< prev index next >

src/hotspot/share/opto/cfgnode.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/convertnode.hpp"

  36 #include "opto/loopnode.hpp"
  37 #include "opto/machnode.hpp"
  38 #include "opto/movenode.hpp"
  39 #include "opto/narrowptrnode.hpp"
  40 #include "opto/mulnode.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/regmask.hpp"
  43 #include "opto/runtime.hpp"
  44 #include "opto/subnode.hpp"
  45 #include "opto/vectornode.hpp"
  46 #include "utilities/vmError.hpp"
  47 
  48 // Portions of code courtesy of Clifford Click
  49 
  50 // Optimization - Graph Style
  51 
  52 //=============================================================================
  53 //------------------------------Value------------------------------------------
  54 // Compute the type of the RegionNode.
  55 const Type* RegionNode::Value(PhaseGVN* phase) const {

 376   Node* n = (Node*)phase->C->root();
 377   nstack.push(n);
 378   visited.set(n->_idx);
 379   while (nstack.size() != 0) {
 380     n = nstack.pop();
 381     uint max = n->outcnt();
 382     for (uint i = 0; i < max; i++) {
 383       Node* m = n->raw_out(i);
 384       if (m != NULL && m->is_CFG()) {
 385         if (m == this) {
 386           return false; // We reached the Region node - it is not dead.
 387         }
 388         if (!visited.test_set(m->_idx))
 389           nstack.push(m);
 390       }
 391     }
 392   }
 393   return true; // The Region node is unreachable - it is dead.
 394 }
 395 
 396 bool RegionNode::try_clean_mem_phi(PhaseGVN *phase) {
 397   // Incremental inlining + PhaseStringOpts sometimes produce:
 398   //
 399   // cmpP with 1 top input
 400   //           |
 401   //          If
 402   //         /  \
 403   //   IfFalse  IfTrue  /- Some Node
 404   //         \  /      /    /
 405   //        Region    / /-MergeMem
 406   //             \---Phi
 407   //
 408   //
 409   // It's expected by PhaseStringOpts that the Region goes away and is
 410   // replaced by If's control input but because there's still a Phi,
 411   // the Region stays in the graph. The top input from the cmpP is
 412   // propagated forward and a subgraph that is useful goes away. The
 413   // code below replaces the Phi with the MergeMem so that the Region
 414   // is simplified.
 415 
 416   PhiNode* phi = has_unique_phi();
 417   if (phi && phi->type() == Type::MEMORY && req() == 3 && phi->is_diamond_phi(true)) {
 418     MergeMemNode* m = NULL;
 419     assert(phi->req() == 3, "same as region");

 420     for (uint i = 1; i < 3; ++i) {
 421       Node *mem = phi->in(i);
 422       if (mem && mem->is_MergeMem() && in(i)->outcnt() == 1) {
 423         // Nothing is control-dependent on path #i except the region itself.
 424         m = mem->as_MergeMem();
 425         uint j = 3 - i;
 426         Node* other = phi->in(j);
 427         if (other && other == m->base_memory()) {
 428           // m is a successor memory to other, and is not pinned inside the diamond, so push it out.
 429           // This will allow the diamond to collapse completely.
 430           phase->is_IterGVN()->replace_node(phi, m);
 431           return true;
 432         }
 433       }
 434     }
 435   }
 436   return false;
 437 }
 438 
 439 //------------------------------Ideal------------------------------------------
 440 // Return a node which is more "ideal" than the current node.  Must preserve
 441 // the CFG, but we can still strip out dead paths.
 442 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 443   if( !can_reshape && !in(0) ) return NULL;     // Already degraded to a Copy
 444   assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
 445 
 446   // Check for RegionNode with no Phi users and both inputs come from either
 447   // arm of the same IF.  If found, then the control-flow split is useless.
 448   bool has_phis = false;
 449   if (can_reshape) {            // Need DU info to check for Phi users
 450     has_phis = (has_phi() != NULL);       // Cache result
 451     if (has_phis && try_clean_mem_phi(phase)) {
 452       has_phis = false;







 453     }
 454 
 455     if (!has_phis) {            // No Phi users?  Nothing merging?
 456       for (uint i = 1; i < req()-1; i++) {
 457         Node *if1 = in(i);
 458         if( !if1 ) continue;
 459         Node *iff = if1->in(0);
 460         if( !iff || !iff->is_If() ) continue;
 461         for( uint j=i+1; j<req(); j++ ) {
 462           if( in(j) && in(j)->in(0) == iff &&
 463               if1->Opcode() != in(j)->Opcode() ) {
 464             // Add the IF Projections to the worklist. They (and the IF itself)
 465             // will be eliminated if dead.
 466             phase->is_IterGVN()->add_users_to_worklist(iff);
 467             set_req(i, iff->in(0));// Skip around the useless IF diamond
 468             set_req(j, NULL);
 469             return this;      // Record progress
 470           }
 471         }
 472       }

 842   if (iff1 == iff2) {
 843     igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
 844     igvn->replace_input_of(region, idx1, iff1->in(0));
 845     igvn->replace_input_of(region, idx2, igvn->C->top());
 846     return (region == this); // Remove useless if (both projections map to the same control/value)
 847   }
 848   BoolNode* bol1 = iff1->in(1)->isa_Bool();
 849   BoolNode* bol2 = iff2->in(1)->isa_Bool();
 850   if (bol1 == NULL || bol2 == NULL) {
 851     return false; // No bool inputs found
 852   }
 853   Node* cmp1 = bol1->in(1);
 854   Node* cmp2 = bol2->in(1);
 855   bool commute = false;
 856   if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
 857     return false; // No comparison
 858   } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
 859              cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
 860              cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
 861              cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
 862              cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck()) {

 863     // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
 864     // SubTypeCheck is not commutative
 865     return false;
 866   } else if (cmp1 != cmp2) {
 867     if (cmp1->in(1) == cmp2->in(2) &&
 868         cmp1->in(2) == cmp2->in(1)) {
 869       commute = true; // Same but swapped inputs, commute the test
 870     } else {
 871       return false; // Ifs are not comparing the same values
 872     }
 873   }
 874   proj1 = proj1->other_if_proj();
 875   proj2 = proj2->other_if_proj();
 876   if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
 877          proj2->unique_ctrl_out_or_null() == this) ||
 878         (proj2->unique_ctrl_out_or_null() == iff1 &&
 879          proj1->unique_ctrl_out_or_null() == this))) {
 880     return false; // Ifs are not connected through other projs
 881   }
 882   // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged

 923 
 924 //=============================================================================
 925 // note that these functions assume that the _adr_type field is flattened
 926 uint PhiNode::hash() const {
 927   const Type* at = _adr_type;
 928   return TypeNode::hash() + (at ? at->hash() : 0);
 929 }
 930 bool PhiNode::cmp( const Node &n ) const {
 931   return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
 932 }
 933 static inline
 934 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
 935   if (at == NULL || at == TypePtr::BOTTOM)  return at;
 936   return Compile::current()->alias_type(at)->adr_type();
 937 }
 938 
 939 //----------------------------make---------------------------------------------
 940 // create a new phi with edges matching r and set (initially) to x
 941 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
 942   uint preds = r->req();   // Number of predecessor paths
 943   assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at");
 944   PhiNode* p = new PhiNode(r, t, at);
 945   for (uint j = 1; j < preds; j++) {
 946     // Fill in all inputs, except those which the region does not yet have
 947     if (r->in(j) != NULL)
 948       p->init_req(j, x);
 949   }
 950   return p;
 951 }
 952 PhiNode* PhiNode::make(Node* r, Node* x) {
 953   const Type*    t  = x->bottom_type();
 954   const TypePtr* at = NULL;
 955   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
 956   return make(r, x, t, at);
 957 }
 958 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
 959   const Type*    t  = x->bottom_type();
 960   const TypePtr* at = NULL;
 961   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
 962   return new PhiNode(r, t, at);
 963 }

1060       np->as_Phi()->verify_adr_type(visited, at);
1061     } else if (n->bottom_type() == Type::TOP
1062                || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1063       // ignore top inputs
1064     } else {
1065       const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1066       // recheck phi/non-phi consistency at leaves:
1067       assert((nat != NULL) == (at != NULL), "");
1068       assert(nat == at || nat == TypePtr::BOTTOM,
1069              "adr_type must be consistent at leaves of phi nest");
1070     }
1071   }
1072 }
1073 
1074 // Verify a whole nest of phis rooted at this one.
1075 void PhiNode::verify_adr_type(bool recursive) const {
1076   if (VMError::is_error_reported())  return;  // muzzle asserts when debugging an error
1077   if (Node::in_dump())               return;  // muzzle asserts when printing
1078 
1079   assert((_type == Type::MEMORY) == (_adr_type != NULL), "adr_type for memory phis only");








1080 
1081   if (!VerifyAliases)       return;  // verify thoroughly only if requested
1082 
1083   assert(_adr_type == flatten_phi_adr_type(_adr_type),
1084          "Phi::adr_type must be pre-normalized");
1085 
1086   if (recursive) {
1087     VectorSet visited;
1088     verify_adr_type(visited, _adr_type);
1089   }
1090 }
1091 #endif
1092 
1093 
1094 //------------------------------Value------------------------------------------
1095 // Compute the type of the PhiNode
1096 const Type* PhiNode::Value(PhaseGVN* phase) const {
1097   Node *r = in(0);              // RegionNode
1098   if( !r )                      // Copy or dead
1099     return in(1) ? phase->type(in(1)) : Type::TOP;

1399 Node* PhiNode::Identity(PhaseGVN* phase) {
1400   // Check for no merging going on
1401   // (There used to be special-case code here when this->region->is_Loop.
1402   // It would check for a tributary phi on the backedge that the main phi
1403   // trivially, perhaps with a single cast.  The unique_input method
1404   // does all this and more, by reducing such tributaries to 'this'.)
1405   Node* uin = unique_input(phase, false);
1406   if (uin != NULL) {
1407     return uin;
1408   }
1409 
1410   int true_path = is_diamond_phi();
1411   // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet.
1412   if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) {
1413     Node* id = is_cmove_id(phase, true_path);
1414     if (id != NULL) {
1415       return id;
1416     }
1417   }
1418 








1419   // Looking for phis with identical inputs.  If we find one that has
1420   // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1421   if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1422       TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1423     uint phi_len = req();
1424     Node* phi_reg = region();
1425     for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1426       Node* u = phi_reg->fast_out(i);
1427       if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1428           u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1429           u->req() == phi_len) {
1430         for (uint j = 1; j < phi_len; j++) {
1431           if (in(j) != u->in(j)) {
1432             u = NULL;
1433             break;
1434           }
1435         }
1436         if (u != NULL) {
1437           return u;
1438         }

1921         } else if (rc->in(0)->in(1) != NULL &&
1922                    rc->in(0)->in(1)->is_Bool()) {
1923           if (worklist.member(rc->in(0)->in(1))) {
1924             delay = true;
1925           } else if (rc->in(0)->in(1)->in(1) != NULL &&
1926                      rc->in(0)->in(1)->in(1)->is_Cmp()) {
1927             if (worklist.member(rc->in(0)->in(1)->in(1))) {
1928               delay = true;
1929             }
1930           }
1931         }
1932       }
1933     }
1934   }
1935   if (delay) {
1936     worklist.push(this);
1937   }
1938   return delay;
1939 }
1940 












































1941 //------------------------------Ideal------------------------------------------
1942 // Return a node which is more "ideal" than the current node.  Must preserve
1943 // the CFG, but we can still strip out dead paths.
1944 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1945   Node *r = in(0);              // RegionNode
1946   assert(r != NULL && r->is_Region(), "this phi must have a region");
1947   assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge");
1948 
1949   // Note: During parsing, phis are often transformed before their regions.
1950   // This means we have to use type_or_null to defend against untyped regions.
1951   if( phase->type_or_null(r) == Type::TOP ) // Dead code?
1952     return NULL;                // No change
1953 
1954   Node *top = phase->C->top();
1955   bool new_phi = (outcnt() == 0); // transforming new Phi
1956   // No change for igvn if new phi is not hooked
1957   if (new_phi && can_reshape)
1958     return NULL;
1959 
1960   // The are 2 situations when only one valid phi's input is left

2222           for (uint i = 1; i < req(); i++) {
2223             offset->init_req(i, in(i)->in(AddPNode::Offset));
2224           }
2225           phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2226         }
2227         return new AddPNode(base, address, offset);
2228       }
2229     }
2230   }
2231 
2232   // Split phis through memory merges, so that the memory merges will go away.
2233   // Piggy-back this transformation on the search for a unique input....
2234   // It will be as if the merged memory is the unique value of the phi.
2235   // (Do not attempt this optimization unless parsing is complete.
2236   // It would make the parser's memory-merge logic sick.)
2237   // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2238   if (progress == NULL && can_reshape && type() == Type::MEMORY) {
2239     // see if this phi should be sliced
2240     uint merge_width = 0;
2241     bool saw_self = false;


2242     for( uint i=1; i<req(); ++i ) {// For all paths in
2243       Node *ii = in(i);
2244       // TOP inputs should not be counted as safe inputs because if the
2245       // Phi references itself through all other inputs then splitting the
2246       // Phi through memory merges would create dead loop at later stage.
2247       if (ii == top) {
2248         return NULL; // Delay optimization until graph is cleaned.
2249       }
2250       if (ii->is_MergeMem()) {
2251         MergeMemNode* n = ii->as_MergeMem();
2252         merge_width = MAX2(merge_width, n->req());
2253         saw_self = saw_self || (n->base_memory() == this);


2254       }
2255     }
2256 
2257     // This restriction is temporarily necessary to ensure termination:
2258     if (!saw_self && adr_type() == TypePtr::BOTTOM)  merge_width = 0;
2259 
2260     if (merge_width > Compile::AliasIdxRaw) {
2261       // found at least one non-empty MergeMem
2262       const TypePtr* at = adr_type();
2263       if (at != TypePtr::BOTTOM) {
2264         // Patch the existing phi to select an input from the merge:
2265         // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2266         //     Phi:AT1(...m1...)
2267         int alias_idx = phase->C->get_alias_index(at);
2268         for (uint i=1; i<req(); ++i) {
2269           Node *ii = in(i);
2270           if (ii->is_MergeMem()) {
2271             MergeMemNode* n = ii->as_MergeMem();
2272             // compress paths and change unreachable cycles to TOP
2273             // If not, we can update the input infinitely along a MergeMem cycle
2274             // Equivalent code is in MemNode::Ideal_common
2275             Node *m  = phase->transform(n);
2276             if (outcnt() == 0) {  // Above transform() may kill us!
2277               return top;
2278             }

2309         if (!saw_safe_input) {
2310           // There is a dead loop: All inputs are either dead or reference back to this phi
2311           return top;
2312         }
2313 
2314         // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2315         //     MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2316         PhaseIterGVN* igvn = phase->is_IterGVN();
2317         assert(igvn != NULL, "sanity check");
2318         Node* hook = new Node(1);
2319         PhiNode* new_base = (PhiNode*) clone();
2320         // Must eagerly register phis, since they participate in loops.
2321         igvn->register_new_node_with_optimizer(new_base);
2322         hook->add_req(new_base);
2323 
2324         MergeMemNode* result = MergeMemNode::make(new_base);
2325         for (uint i = 1; i < req(); ++i) {
2326           Node *ii = in(i);
2327           if (ii->is_MergeMem()) {
2328             MergeMemNode* n = ii->as_MergeMem();





2329             for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2330               // If we have not seen this slice yet, make a phi for it.
2331               bool made_new_phi = false;
2332               if (mms.is_empty()) {
2333                 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2334                 made_new_phi = true;
2335                 igvn->register_new_node_with_optimizer(new_phi);
2336                 hook->add_req(new_phi);
2337                 mms.set_memory(new_phi);
2338               }
2339               Node* phi = mms.memory();
2340               assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2341               phi->set_req(i, mms.memory2());
2342             }
2343           }
2344         }
2345         // Distribute all self-loops.
2346         { // (Extra braces to hide mms.)
2347           for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2348             Node* phi = mms.memory();

2427             if (is_decodeN) {
2428               new_ii = new EncodePNode(ii, narrow_t);
2429             } else {
2430               new_ii = new EncodePKlassNode(ii, narrow_t);
2431             }
2432             igvn->register_new_node_with_optimizer(new_ii);
2433           }
2434         }
2435         new_phi->set_req(i, new_ii);
2436       }
2437       igvn->register_new_node_with_optimizer(new_phi, this);
2438       if (is_decodeN) {
2439         progress = new DecodeNNode(new_phi, bottom_type());
2440       } else {
2441         progress = new DecodeNKlassNode(new_phi, bottom_type());
2442       }
2443     }
2444   }
2445 #endif
2446 














































































2447   // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2448   if (EnableVectorReboxing && can_reshape && progress == NULL && type()->isa_oopptr()) {
2449     progress = merge_through_phi(this, phase->is_IterGVN());
2450   }
2451 
2452   return progress;              // Return any progress
2453 }
2454 
2455 Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) {
2456   Node_Stack stack(1);
2457   VectorSet  visited;
2458   Node_List  node_map;
2459 
2460   stack.push(root_phi, 1); // ignore control
2461   visited.set(root_phi->_idx);
2462 
2463   Node* new_phi = new PhiNode(root_phi->in(0), t);
2464   node_map.map(root_phi->_idx, new_phi);
2465 
2466   while (stack.is_nonempty()) {

2519         }
2520       } else if (in->Opcode() == Op_VectorBox) {
2521         VectorBoxNode* vbox = static_cast<VectorBoxNode*>(in);
2522         if (cached_vbox == NULL) {
2523           cached_vbox = vbox;
2524         } else if (vbox->vec_type() != cached_vbox->vec_type()) {
2525           // TODO: vector type mismatch can be handled with additional reinterpret casts
2526           assert(Type::cmp(vbox->vec_type(), cached_vbox->vec_type()) != 0, "inconsistent");
2527           return NULL; // not optimizable: vector type mismatch
2528         } else if (vbox->box_type() != cached_vbox->box_type()) {
2529           assert(Type::cmp(vbox->box_type(), cached_vbox->box_type()) != 0, "inconsistent");
2530           return NULL; // not optimizable: box type mismatch
2531         }
2532       } else {
2533         return NULL; // not optimizable: neither Phi nor VectorBox
2534       }
2535     } else {
2536       stack.pop();
2537     }
2538   }

2539   assert(cached_vbox != NULL, "sanity");
2540   const TypeInstPtr* btype = cached_vbox->box_type();
2541   const TypeVect*    vtype = cached_vbox->vec_type();
2542   Node* new_vbox_phi = clone_through_phi(root_phi, btype, VectorBoxNode::Box,   igvn);
2543   Node* new_vect_phi = clone_through_phi(root_phi, vtype, VectorBoxNode::Value, igvn);
2544   return new VectorBoxNode(igvn->C, new_vbox_phi, new_vect_phi, btype, vtype);
2545 }
2546 
2547 bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) {
2548   // First, take the short cut when we know it is a loop and the EntryControl data path is dead.
2549   // The loop node may only have one input because the entry path was removed in PhaseIdealLoop::Dominators().
2550   // Then, check if there is a data loop when the phi references itself directly or through other data nodes.
2551   assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs");
2552   const bool is_loop = (r->is_Loop() && r->req() == 3);
2553   const Node* top = phase->C->top();
2554   if (is_loop) {
2555     return !uin->eqv_uncast(in(LoopNode::EntryControl));
2556   } else {
2557     // We have a data loop either with an unsafe data reference or if a region is unreachable.
2558     return is_unsafe_data_reference(uin)

2763   return in(0)->in(0);
2764 }
2765 
2766 
2767 #ifndef PRODUCT
2768 void CatchProjNode::dump_spec(outputStream *st) const {
2769   ProjNode::dump_spec(st);
2770   st->print("@bci %d ",_handler_bci);
2771 }
2772 #endif
2773 
2774 //=============================================================================
2775 //------------------------------Identity---------------------------------------
2776 // Check for CreateEx being Identity.
2777 Node* CreateExNode::Identity(PhaseGVN* phase) {
2778   if( phase->type(in(1)) == Type::TOP ) return in(1);
2779   if( phase->type(in(0)) == Type::TOP ) return in(0);
2780   // We only come from CatchProj, unless the CatchProj goes away.
2781   // If the CatchProj is optimized away, then we just carry the
2782   // exception oop through.






2783   CallNode *call = in(1)->in(0)->as_Call();
2784 
2785   return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) )
2786     ? this
2787     : call->in(TypeFunc::Parms);
2788 }
2789 
2790 //=============================================================================
2791 //------------------------------Value------------------------------------------
2792 // Check for being unreachable.
2793 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
2794   if (!in(0) || in(0)->is_top()) return Type::TOP;
2795   return bottom_type();
2796 }
2797 
2798 //------------------------------Ideal------------------------------------------
2799 // Check for no longer being part of a loop
2800 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2801   if (can_reshape && !in(0)->is_Region()) {
2802     // Dead code elimination can sometimes delete this projection so

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/inlinetypenode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/narrowptrnode.hpp"
  41 #include "opto/mulnode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/regmask.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "opto/subnode.hpp"
  46 #include "opto/vectornode.hpp"
  47 #include "utilities/vmError.hpp"
  48 
  49 // Portions of code courtesy of Clifford Click
  50 
  51 // Optimization - Graph Style
  52 
  53 //=============================================================================
  54 //------------------------------Value------------------------------------------
  55 // Compute the type of the RegionNode.
  56 const Type* RegionNode::Value(PhaseGVN* phase) const {

 377   Node* n = (Node*)phase->C->root();
 378   nstack.push(n);
 379   visited.set(n->_idx);
 380   while (nstack.size() != 0) {
 381     n = nstack.pop();
 382     uint max = n->outcnt();
 383     for (uint i = 0; i < max; i++) {
 384       Node* m = n->raw_out(i);
 385       if (m != NULL && m->is_CFG()) {
 386         if (m == this) {
 387           return false; // We reached the Region node - it is not dead.
 388         }
 389         if (!visited.test_set(m->_idx))
 390           nstack.push(m);
 391       }
 392     }
 393   }
 394   return true; // The Region node is unreachable - it is dead.
 395 }
 396 
 397 Node* PhiNode::try_clean_mem_phi(PhaseGVN *phase) {
 398   // Incremental inlining + PhaseStringOpts sometimes produce:
 399   //
 400   // cmpP with 1 top input
 401   //           |
 402   //          If
 403   //         /  \
 404   //   IfFalse  IfTrue  /- Some Node
 405   //         \  /      /    /
 406   //        Region    / /-MergeMem
 407   //             \---Phi
 408   //
 409   //
 410   // It's expected by PhaseStringOpts that the Region goes away and is
 411   // replaced by If's control input but because there's still a Phi,
 412   // the Region stays in the graph. The top input from the cmpP is
 413   // propagated forward and a subgraph that is useful goes away. The
 414   // code below replaces the Phi with the MergeMem so that the Region
 415   // is simplified.
 416 
 417   if (type() == Type::MEMORY && is_diamond_phi(true)) {

 418     MergeMemNode* m = NULL;
 419     assert(req() == 3, "same as region");
 420     Node* r = in(0);
 421     for (uint i = 1; i < 3; ++i) {
 422       Node *mem = in(i);
 423       if (mem && mem->is_MergeMem() && r->in(i)->outcnt() == 1) {
 424         // Nothing is control-dependent on path #i except the region itself.
 425         m = mem->as_MergeMem();
 426         uint j = 3 - i;
 427         Node* other = in(j);
 428         if (other && other == m->base_memory()) {
 429           // m is a successor memory to other, and is not pinned inside the diamond, so push it out.
 430           // This will allow the diamond to collapse completely.
 431           return m;

 432         }
 433       }
 434     }
 435   }
 436   return NULL;
 437 }
 438 
 439 //------------------------------Ideal------------------------------------------
 440 // Return a node which is more "ideal" than the current node.  Must preserve
 441 // the CFG, but we can still strip out dead paths.
 442 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 443   if( !can_reshape && !in(0) ) return NULL;     // Already degraded to a Copy
 444   assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
 445 
 446   // Check for RegionNode with no Phi users and both inputs come from either
 447   // arm of the same IF.  If found, then the control-flow split is useless.
 448   bool has_phis = false;
 449   if (can_reshape) {            // Need DU info to check for Phi users
 450     has_phis = (has_phi() != NULL);       // Cache result
 451     if (has_phis) {
 452       PhiNode* phi = has_unique_phi();
 453       if (phi != NULL) {
 454         Node* m = phi->try_clean_mem_phi(phase);
 455         if (m != NULL) {
 456           phase->is_IterGVN()->replace_node(phi, m);
 457           has_phis = false;
 458         }
 459       }
 460     }
 461 
 462     if (!has_phis) {            // No Phi users?  Nothing merging?
 463       for (uint i = 1; i < req()-1; i++) {
 464         Node *if1 = in(i);
 465         if( !if1 ) continue;
 466         Node *iff = if1->in(0);
 467         if( !iff || !iff->is_If() ) continue;
 468         for( uint j=i+1; j<req(); j++ ) {
 469           if( in(j) && in(j)->in(0) == iff &&
 470               if1->Opcode() != in(j)->Opcode() ) {
 471             // Add the IF Projections to the worklist. They (and the IF itself)
 472             // will be eliminated if dead.
 473             phase->is_IterGVN()->add_users_to_worklist(iff);
 474             set_req(i, iff->in(0));// Skip around the useless IF diamond
 475             set_req(j, NULL);
 476             return this;      // Record progress
 477           }
 478         }
 479       }

 849   if (iff1 == iff2) {
 850     igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
 851     igvn->replace_input_of(region, idx1, iff1->in(0));
 852     igvn->replace_input_of(region, idx2, igvn->C->top());
 853     return (region == this); // Remove useless if (both projections map to the same control/value)
 854   }
 855   BoolNode* bol1 = iff1->in(1)->isa_Bool();
 856   BoolNode* bol2 = iff2->in(1)->isa_Bool();
 857   if (bol1 == NULL || bol2 == NULL) {
 858     return false; // No bool inputs found
 859   }
 860   Node* cmp1 = bol1->in(1);
 861   Node* cmp2 = bol2->in(1);
 862   bool commute = false;
 863   if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
 864     return false; // No comparison
 865   } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
 866              cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
 867              cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
 868              cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
 869              cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() ||
 870              cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) {
 871     // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
 872     // SubTypeCheck is not commutative
 873     return false;
 874   } else if (cmp1 != cmp2) {
 875     if (cmp1->in(1) == cmp2->in(2) &&
 876         cmp1->in(2) == cmp2->in(1)) {
 877       commute = true; // Same but swapped inputs, commute the test
 878     } else {
 879       return false; // Ifs are not comparing the same values
 880     }
 881   }
 882   proj1 = proj1->other_if_proj();
 883   proj2 = proj2->other_if_proj();
 884   if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
 885          proj2->unique_ctrl_out_or_null() == this) ||
 886         (proj2->unique_ctrl_out_or_null() == iff1 &&
 887          proj1->unique_ctrl_out_or_null() == this))) {
 888     return false; // Ifs are not connected through other projs
 889   }
 890   // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged

 931 
 932 //=============================================================================
 933 // note that these functions assume that the _adr_type field is flattened
 934 uint PhiNode::hash() const {
 935   const Type* at = _adr_type;
 936   return TypeNode::hash() + (at ? at->hash() : 0);
 937 }
 938 bool PhiNode::cmp( const Node &n ) const {
 939   return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
 940 }
 941 static inline
 942 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
 943   if (at == NULL || at == TypePtr::BOTTOM)  return at;
 944   return Compile::current()->alias_type(at)->adr_type();
 945 }
 946 
 947 //----------------------------make---------------------------------------------
 948 // create a new phi with edges matching r and set (initially) to x
 949 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
 950   uint preds = r->req();   // Number of predecessor paths
 951   assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flattened_accesses_share_alias()), "flatten at");
 952   PhiNode* p = new PhiNode(r, t, at);
 953   for (uint j = 1; j < preds; j++) {
 954     // Fill in all inputs, except those which the region does not yet have
 955     if (r->in(j) != NULL)
 956       p->init_req(j, x);
 957   }
 958   return p;
 959 }
 960 PhiNode* PhiNode::make(Node* r, Node* x) {
 961   const Type*    t  = x->bottom_type();
 962   const TypePtr* at = NULL;
 963   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
 964   return make(r, x, t, at);
 965 }
 966 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
 967   const Type*    t  = x->bottom_type();
 968   const TypePtr* at = NULL;
 969   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
 970   return new PhiNode(r, t, at);
 971 }

1068       np->as_Phi()->verify_adr_type(visited, at);
1069     } else if (n->bottom_type() == Type::TOP
1070                || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1071       // ignore top inputs
1072     } else {
1073       const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1074       // recheck phi/non-phi consistency at leaves:
1075       assert((nat != NULL) == (at != NULL), "");
1076       assert(nat == at || nat == TypePtr::BOTTOM,
1077              "adr_type must be consistent at leaves of phi nest");
1078     }
1079   }
1080 }
1081 
1082 // Verify a whole nest of phis rooted at this one.
1083 void PhiNode::verify_adr_type(bool recursive) const {
1084   if (VMError::is_error_reported())  return;  // muzzle asserts when debugging an error
1085   if (Node::in_dump())               return;  // muzzle asserts when printing
1086 
1087   assert((_type == Type::MEMORY) == (_adr_type != NULL), "adr_type for memory phis only");
1088   // Flat array element shouldn't get their own memory slice until flattened_accesses_share_alias is cleared.
1089   // It could be the graph has no loads/stores and flattened_accesses_share_alias is never cleared. EA could still
1090   // creates per element Phis but that wouldn't be a problem as there are no memory accesses for that array.
1091   assert(_adr_type == NULL || _adr_type->isa_aryptr() == NULL ||
1092          _adr_type->is_aryptr()->is_known_instance() ||
1093          !_adr_type->is_aryptr()->is_flat() ||
1094          !Compile::current()->flattened_accesses_share_alias() ||
1095          _adr_type == TypeAryPtr::INLINES, "flat array element shouldn't get its own slice yet");
1096 
1097   if (!VerifyAliases)       return;  // verify thoroughly only if requested
1098 
1099   assert(_adr_type == flatten_phi_adr_type(_adr_type),
1100          "Phi::adr_type must be pre-normalized");
1101 
1102   if (recursive) {
1103     VectorSet visited;
1104     verify_adr_type(visited, _adr_type);
1105   }
1106 }
1107 #endif
1108 
1109 
1110 //------------------------------Value------------------------------------------
1111 // Compute the type of the PhiNode
1112 const Type* PhiNode::Value(PhaseGVN* phase) const {
1113   Node *r = in(0);              // RegionNode
1114   if( !r )                      // Copy or dead
1115     return in(1) ? phase->type(in(1)) : Type::TOP;

1415 Node* PhiNode::Identity(PhaseGVN* phase) {
1416   // Check for no merging going on
1417   // (There used to be special-case code here when this->region->is_Loop.
1418   // It would check for a tributary phi on the backedge that the main phi
1419   // trivially, perhaps with a single cast.  The unique_input method
1420   // does all this and more, by reducing such tributaries to 'this'.)
1421   Node* uin = unique_input(phase, false);
1422   if (uin != NULL) {
1423     return uin;
1424   }
1425 
1426   int true_path = is_diamond_phi();
1427   // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet.
1428   if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) {
1429     Node* id = is_cmove_id(phase, true_path);
1430     if (id != NULL) {
1431       return id;
1432     }
1433   }
1434 
1435   if (phase->is_IterGVN()) {
1436     Node* m = try_clean_mem_phi(phase);
1437     if (m != NULL) {
1438       return m;
1439     }
1440   }
1441 
1442 
1443   // Looking for phis with identical inputs.  If we find one that has
1444   // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1445   if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1446       TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1447     uint phi_len = req();
1448     Node* phi_reg = region();
1449     for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1450       Node* u = phi_reg->fast_out(i);
1451       if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1452           u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1453           u->req() == phi_len) {
1454         for (uint j = 1; j < phi_len; j++) {
1455           if (in(j) != u->in(j)) {
1456             u = NULL;
1457             break;
1458           }
1459         }
1460         if (u != NULL) {
1461           return u;
1462         }

1945         } else if (rc->in(0)->in(1) != NULL &&
1946                    rc->in(0)->in(1)->is_Bool()) {
1947           if (worklist.member(rc->in(0)->in(1))) {
1948             delay = true;
1949           } else if (rc->in(0)->in(1)->in(1) != NULL &&
1950                      rc->in(0)->in(1)->in(1)->is_Cmp()) {
1951             if (worklist.member(rc->in(0)->in(1)->in(1))) {
1952               delay = true;
1953             }
1954           }
1955         }
1956       }
1957     }
1958   }
1959   if (delay) {
1960     worklist.push(this);
1961   }
1962   return delay;
1963 }
1964 
1965 // Push inline type input nodes (and null) down through the phi recursively (can handle data loops).
1966 InlineTypeNode* PhiNode::push_inline_types_through(PhaseGVN* phase, bool can_reshape, ciInlineKlass* vk, bool is_init) {
1967   InlineTypeNode* vt = InlineTypeNode::make_null(*phase, vk)->clone_with_phis(phase, in(0), is_init);
1968   if (can_reshape) {
1969     // Replace phi right away to be able to use the inline
1970     // type node when reaching the phi again through data loops.
1971     PhaseIterGVN* igvn = phase->is_IterGVN();
1972     for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1973       Node* u = fast_out(i);
1974       igvn->rehash_node_delayed(u);
1975       imax -= u->replace_edge(this, vt);
1976       --i;
1977     }
1978     igvn->rehash_node_delayed(this);
1979     assert(outcnt() == 0, "should be dead now");
1980   }
1981   ResourceMark rm;
1982   Node_List casts;
1983   for (uint i = 1; i < req(); ++i) {
1984     Node* n = in(i);
1985     while (n->is_ConstraintCast()) {
1986       casts.push(n);
1987       n = n->in(1);
1988     }
1989     if (phase->type(n)->is_zero_type()) {
1990       n = InlineTypeNode::make_null(*phase, vk);
1991     } else if (n->is_Phi()) {
1992       assert(can_reshape, "can only handle phis during IGVN");
1993       n = phase->transform(n->as_Phi()->push_inline_types_through(phase, can_reshape, vk, is_init));
1994     }
1995     while (casts.size() != 0) {
1996       // Push the cast(s) through the InlineTypeNode
1997       Node* cast = casts.pop()->clone();
1998       cast->set_req_X(1, n->as_InlineType()->get_oop(), phase);
1999       n = n->clone();
2000       n->as_InlineType()->set_oop(phase->transform(cast));
2001       n = phase->transform(n);
2002     }
2003     bool transform = !can_reshape && (i == (req()-1)); // Transform phis on last merge
2004     vt->merge_with(phase, n->as_InlineType(), i, transform);
2005   }
2006   return vt;
2007 }
2008 
2009 //------------------------------Ideal------------------------------------------
2010 // Return a node which is more "ideal" than the current node.  Must preserve
2011 // the CFG, but we can still strip out dead paths.
2012 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2013   Node *r = in(0);              // RegionNode
2014   assert(r != NULL && r->is_Region(), "this phi must have a region");
2015   assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge");
2016 
2017   // Note: During parsing, phis are often transformed before their regions.
2018   // This means we have to use type_or_null to defend against untyped regions.
2019   if( phase->type_or_null(r) == Type::TOP ) // Dead code?
2020     return NULL;                // No change
2021 
2022   Node *top = phase->C->top();
2023   bool new_phi = (outcnt() == 0); // transforming new Phi
2024   // No change for igvn if new phi is not hooked
2025   if (new_phi && can_reshape)
2026     return NULL;
2027 
2028   // The are 2 situations when only one valid phi's input is left

2290           for (uint i = 1; i < req(); i++) {
2291             offset->init_req(i, in(i)->in(AddPNode::Offset));
2292           }
2293           phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2294         }
2295         return new AddPNode(base, address, offset);
2296       }
2297     }
2298   }
2299 
2300   // Split phis through memory merges, so that the memory merges will go away.
2301   // Piggy-back this transformation on the search for a unique input....
2302   // It will be as if the merged memory is the unique value of the phi.
2303   // (Do not attempt this optimization unless parsing is complete.
2304   // It would make the parser's memory-merge logic sick.)
2305   // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2306   if (progress == NULL && can_reshape && type() == Type::MEMORY) {
2307     // see if this phi should be sliced
2308     uint merge_width = 0;
2309     bool saw_self = false;
2310     // TODO revisit this with JDK-8247216
2311     bool mergemem_only = true;
2312     for( uint i=1; i<req(); ++i ) {// For all paths in
2313       Node *ii = in(i);
2314       // TOP inputs should not be counted as safe inputs because if the
2315       // Phi references itself through all other inputs then splitting the
2316       // Phi through memory merges would create dead loop at later stage.
2317       if (ii == top) {
2318         return NULL; // Delay optimization until graph is cleaned.
2319       }
2320       if (ii->is_MergeMem()) {
2321         MergeMemNode* n = ii->as_MergeMem();
2322         merge_width = MAX2(merge_width, n->req());
2323         saw_self = saw_self || (n->base_memory() == this);
2324       } else {
2325         mergemem_only = false;
2326       }
2327     }
2328 
2329     // This restriction is temporarily necessary to ensure termination:
2330     if (!mergemem_only && !saw_self && adr_type() == TypePtr::BOTTOM)  merge_width = 0;
2331 
2332     if (merge_width > Compile::AliasIdxRaw) {
2333       // found at least one non-empty MergeMem
2334       const TypePtr* at = adr_type();
2335       if (at != TypePtr::BOTTOM) {
2336         // Patch the existing phi to select an input from the merge:
2337         // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2338         //     Phi:AT1(...m1...)
2339         int alias_idx = phase->C->get_alias_index(at);
2340         for (uint i=1; i<req(); ++i) {
2341           Node *ii = in(i);
2342           if (ii->is_MergeMem()) {
2343             MergeMemNode* n = ii->as_MergeMem();
2344             // compress paths and change unreachable cycles to TOP
2345             // If not, we can update the input infinitely along a MergeMem cycle
2346             // Equivalent code is in MemNode::Ideal_common
2347             Node *m  = phase->transform(n);
2348             if (outcnt() == 0) {  // Above transform() may kill us!
2349               return top;
2350             }

2381         if (!saw_safe_input) {
2382           // There is a dead loop: All inputs are either dead or reference back to this phi
2383           return top;
2384         }
2385 
2386         // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2387         //     MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2388         PhaseIterGVN* igvn = phase->is_IterGVN();
2389         assert(igvn != NULL, "sanity check");
2390         Node* hook = new Node(1);
2391         PhiNode* new_base = (PhiNode*) clone();
2392         // Must eagerly register phis, since they participate in loops.
2393         igvn->register_new_node_with_optimizer(new_base);
2394         hook->add_req(new_base);
2395 
2396         MergeMemNode* result = MergeMemNode::make(new_base);
2397         for (uint i = 1; i < req(); ++i) {
2398           Node *ii = in(i);
2399           if (ii->is_MergeMem()) {
2400             MergeMemNode* n = ii->as_MergeMem();
2401             if (igvn) {
2402               // TODO revisit this with JDK-8247216
2403               // Put 'n' on the worklist because it might be modified by MergeMemStream::iteration_setup
2404               igvn->_worklist.push(n);
2405             }
2406             for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2407               // If we have not seen this slice yet, make a phi for it.
2408               bool made_new_phi = false;
2409               if (mms.is_empty()) {
2410                 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2411                 made_new_phi = true;
2412                 igvn->register_new_node_with_optimizer(new_phi);
2413                 hook->add_req(new_phi);
2414                 mms.set_memory(new_phi);
2415               }
2416               Node* phi = mms.memory();
2417               assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2418               phi->set_req(i, mms.memory2());
2419             }
2420           }
2421         }
2422         // Distribute all self-loops.
2423         { // (Extra braces to hide mms.)
2424           for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2425             Node* phi = mms.memory();

2504             if (is_decodeN) {
2505               new_ii = new EncodePNode(ii, narrow_t);
2506             } else {
2507               new_ii = new EncodePKlassNode(ii, narrow_t);
2508             }
2509             igvn->register_new_node_with_optimizer(new_ii);
2510           }
2511         }
2512         new_phi->set_req(i, new_ii);
2513       }
2514       igvn->register_new_node_with_optimizer(new_phi, this);
2515       if (is_decodeN) {
2516         progress = new DecodeNNode(new_phi, bottom_type());
2517       } else {
2518         progress = new DecodeNKlassNode(new_phi, bottom_type());
2519       }
2520     }
2521   }
2522 #endif
2523 
2524   // Check recursively if inputs are either an inline type, constant null
2525   // or another Phi (including self references through data loops). If so,
2526   // push the inline types down through the phis to enable folding of loads.
2527   if (EnableValhalla && (_type->isa_ptr() || _type->isa_inlinetype()) && req() > 2) {
2528     ResourceMark rm;
2529     Unique_Node_List worklist;
2530     worklist.push(this);
2531     bool can_optimize = true;
2532     ciInlineKlass* vk = NULL;
2533     // true if all IsInit inputs of all InlineType* nodes are true
2534     bool is_init = true;
2535     Node_List casts;
2536 
2537     // TODO 8284443 We need to prevent endless pushing through
2538     // TODO 8284443 We could revisit the same node over and over again, right?
2539     // TestLWorld -XX:+UseZGC -DScenarios=0 -DTest=test69
2540     // TestLWorld -XX:-TieredCompilation -XX:-DoEscapeAnalysis -XX:+AlwaysIncrementalInline
2541     bool only_phi = (outcnt() != 0);
2542     for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2543       Node* n = fast_out(i);
2544       if (n->is_InlineType() && n->in(1) == this) {
2545         can_optimize = false;
2546         break;
2547       }
2548       if (!n->is_Phi()) {
2549         only_phi = false;
2550       }
2551     }
2552     if (only_phi) {
2553       can_optimize = false;
2554     }
2555     for (uint next = 0; next < worklist.size() && can_optimize; next++) {
2556       Node* phi = worklist.at(next);
2557       for (uint i = 1; i < phi->req() && can_optimize; i++) {
2558         Node* n = phi->in(i);
2559         if (n == NULL) {
2560           can_optimize = false;
2561           break;
2562         }
2563         while (n->is_ConstraintCast()) {
2564           if (n->in(0) != NULL && n->in(0)->is_top()) {
2565             // Will die, don't optimize
2566             can_optimize = false;
2567             break;
2568           }
2569           casts.push(n);
2570           n = n->in(1);
2571         }
2572         const Type* t = phase->type(n);
2573         if (n->is_InlineType() && (vk == NULL || vk == t->inline_klass())) {
2574           vk = (vk == NULL) ? t->inline_klass() : vk;
2575           if (phase->find_int_con(n->as_InlineType()->get_is_init(), 0) != 1) {
2576             is_init = false;
2577           }
2578         } else if (n->is_Phi() && can_reshape && (n->bottom_type()->isa_ptr() || n->bottom_type()->isa_inlinetype())) {
2579           worklist.push(n);
2580         } else if (t->is_zero_type()) {
2581           is_init = false;
2582         } else {
2583           can_optimize = false;
2584         }
2585       }
2586     }
2587     // Check if cast nodes can be pushed through
2588     const Type* t = Type::get_const_type(vk);
2589     while (casts.size() != 0 && can_optimize && t != NULL) {
2590       Node* cast = casts.pop();
2591       if (t->filter(cast->bottom_type()) == Type::TOP) {
2592         can_optimize = false;
2593       }
2594     }
2595     if (can_optimize && vk != NULL) {
2596 // TODO 8275400
2597 //      assert(!_type->isa_ptr() || _type->maybe_null() || is_init, "Phi not null but a possible null was seen");
2598       return push_inline_types_through(phase, can_reshape, vk, is_init);
2599     }
2600   }
2601 
2602   // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2603   if (EnableVectorReboxing && can_reshape && progress == NULL && type()->isa_oopptr()) {
2604     progress = merge_through_phi(this, phase->is_IterGVN());
2605   }
2606 
2607   return progress;              // Return any progress
2608 }
2609 
2610 Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) {
2611   Node_Stack stack(1);
2612   VectorSet  visited;
2613   Node_List  node_map;
2614 
2615   stack.push(root_phi, 1); // ignore control
2616   visited.set(root_phi->_idx);
2617 
2618   Node* new_phi = new PhiNode(root_phi->in(0), t);
2619   node_map.map(root_phi->_idx, new_phi);
2620 
2621   while (stack.is_nonempty()) {

2674         }
2675       } else if (in->Opcode() == Op_VectorBox) {
2676         VectorBoxNode* vbox = static_cast<VectorBoxNode*>(in);
2677         if (cached_vbox == NULL) {
2678           cached_vbox = vbox;
2679         } else if (vbox->vec_type() != cached_vbox->vec_type()) {
2680           // TODO: vector type mismatch can be handled with additional reinterpret casts
2681           assert(Type::cmp(vbox->vec_type(), cached_vbox->vec_type()) != 0, "inconsistent");
2682           return NULL; // not optimizable: vector type mismatch
2683         } else if (vbox->box_type() != cached_vbox->box_type()) {
2684           assert(Type::cmp(vbox->box_type(), cached_vbox->box_type()) != 0, "inconsistent");
2685           return NULL; // not optimizable: box type mismatch
2686         }
2687       } else {
2688         return NULL; // not optimizable: neither Phi nor VectorBox
2689       }
2690     } else {
2691       stack.pop();
2692     }
2693   }
2694 
2695   assert(cached_vbox != NULL, "sanity");
2696   const TypeInstPtr* btype = cached_vbox->box_type();
2697   const TypeVect*    vtype = cached_vbox->vec_type();
2698   Node* new_vbox_phi = clone_through_phi(root_phi, btype, VectorBoxNode::Box,   igvn);
2699   Node* new_vect_phi = clone_through_phi(root_phi, vtype, VectorBoxNode::Value, igvn);
2700   return new VectorBoxNode(igvn->C, new_vbox_phi, new_vect_phi, btype, vtype);
2701 }
2702 
2703 bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) {
2704   // First, take the short cut when we know it is a loop and the EntryControl data path is dead.
2705   // The loop node may only have one input because the entry path was removed in PhaseIdealLoop::Dominators().
2706   // Then, check if there is a data loop when the phi references itself directly or through other data nodes.
2707   assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs");
2708   const bool is_loop = (r->is_Loop() && r->req() == 3);
2709   const Node* top = phase->C->top();
2710   if (is_loop) {
2711     return !uin->eqv_uncast(in(LoopNode::EntryControl));
2712   } else {
2713     // We have a data loop either with an unsafe data reference or if a region is unreachable.
2714     return is_unsafe_data_reference(uin)

2919   return in(0)->in(0);
2920 }
2921 
2922 
2923 #ifndef PRODUCT
2924 void CatchProjNode::dump_spec(outputStream *st) const {
2925   ProjNode::dump_spec(st);
2926   st->print("@bci %d ",_handler_bci);
2927 }
2928 #endif
2929 
2930 //=============================================================================
2931 //------------------------------Identity---------------------------------------
2932 // Check for CreateEx being Identity.
2933 Node* CreateExNode::Identity(PhaseGVN* phase) {
2934   if( phase->type(in(1)) == Type::TOP ) return in(1);
2935   if( phase->type(in(0)) == Type::TOP ) return in(0);
2936   // We only come from CatchProj, unless the CatchProj goes away.
2937   // If the CatchProj is optimized away, then we just carry the
2938   // exception oop through.
2939 
2940   // CheckCastPPNode::Ideal() for inline types reuses the exception
2941   // paths of a call to perform an allocation: we can see a Phi here.
2942   if (in(1)->is_Phi()) {
2943     return this;
2944   }
2945   CallNode *call = in(1)->in(0)->as_Call();
2946 
2947   return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) )
2948     ? this
2949     : call->in(TypeFunc::Parms);
2950 }
2951 
2952 //=============================================================================
2953 //------------------------------Value------------------------------------------
2954 // Check for being unreachable.
2955 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
2956   if (!in(0) || in(0)->is_top()) return Type::TOP;
2957   return bottom_type();
2958 }
2959 
2960 //------------------------------Ideal------------------------------------------
2961 // Check for no longer being part of a loop
2962 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2963   if (can_reshape && !in(0)->is_Region()) {
2964     // Dead code elimination can sometimes delete this projection so
< prev index next >