< prev index next >

src/hotspot/share/opto/cfgnode.cpp

Print this page

   1 /*
   2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/convertnode.hpp"

  36 #include "opto/loopnode.hpp"
  37 #include "opto/machnode.hpp"
  38 #include "opto/movenode.hpp"
  39 #include "opto/narrowptrnode.hpp"
  40 #include "opto/mulnode.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/regalloc.hpp"
  43 #include "opto/regmask.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "opto/subnode.hpp"
  46 #include "opto/vectornode.hpp"
  47 #include "utilities/vmError.hpp"
  48 
  49 // Portions of code courtesy of Clifford Click
  50 
  51 // Optimization - Graph Style
  52 
  53 //=============================================================================
  54 //------------------------------Value------------------------------------------
  55 // Compute the type of the RegionNode.

 418     } else if (n->is_NeverBranch()) {
 419       // Only follow the loop-internal projection, not the NeverBranch exit
 420       ProjNode* proj = n->as_NeverBranch()->proj_out_or_null(0);
 421       assert(proj != nullptr, "must find loop-internal projection of NeverBranch");
 422       worklist.push(proj);
 423     } else {
 424       // Traverse all CFG outputs
 425       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 426         Node* use = n->fast_out(i);
 427         if (use->is_CFG()) {
 428           worklist.push(use);
 429         }
 430       }
 431     }
 432   }
 433   // No exit found for any loop -> all are infinite
 434   return true;
 435 }
 436 #endif //ASSERT
 437 
 438 bool RegionNode::try_clean_mem_phi(PhaseGVN *phase) {
 439   // Incremental inlining + PhaseStringOpts sometimes produce:
 440   //
 441   // cmpP with 1 top input
 442   //           |
 443   //          If
 444   //         /  \
 445   //   IfFalse  IfTrue  /- Some Node
 446   //         \  /      /    /
 447   //        Region    / /-MergeMem
 448   //             \---Phi
 449   //
 450   //
 451   // It's expected by PhaseStringOpts that the Region goes away and is
 452   // replaced by If's control input but because there's still a Phi,
 453   // the Region stays in the graph. The top input from the cmpP is
 454   // propagated forward and a subgraph that is useful goes away. The
 455   // code below replaces the Phi with the MergeMem so that the Region
 456   // is simplified.
 457 
 458   PhiNode* phi = has_unique_phi();
 459   if (phi && phi->type() == Type::MEMORY && req() == 3 && phi->is_diamond_phi(true)) {
 460     MergeMemNode* m = NULL;
 461     assert(phi->req() == 3, "same as region");

 462     for (uint i = 1; i < 3; ++i) {
 463       Node *mem = phi->in(i);
 464       if (mem && mem->is_MergeMem() && in(i)->outcnt() == 1) {
 465         // Nothing is control-dependent on path #i except the region itself.
 466         m = mem->as_MergeMem();
 467         uint j = 3 - i;
 468         Node* other = phi->in(j);
 469         if (other && other == m->base_memory()) {
 470           // m is a successor memory to other, and is not pinned inside the diamond, so push it out.
 471           // This will allow the diamond to collapse completely.
 472           phase->is_IterGVN()->replace_node(phi, m);
 473           return true;
 474         }
 475       }
 476     }
 477   }
 478   return false;
 479 }
 480 
 481 //------------------------------Ideal------------------------------------------
 482 // Return a node which is more "ideal" than the current node.  Must preserve
 483 // the CFG, but we can still strip out dead paths.
 484 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 485   if( !can_reshape && !in(0) ) return NULL;     // Already degraded to a Copy
 486   assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
 487 
 488   // Check for RegionNode with no Phi users and both inputs come from either
 489   // arm of the same IF.  If found, then the control-flow split is useless.
 490   bool has_phis = false;
 491   if (can_reshape) {            // Need DU info to check for Phi users
 492     has_phis = (has_phi() != NULL);       // Cache result
 493     if (has_phis && try_clean_mem_phi(phase)) {
 494       has_phis = false;







 495     }
 496 
 497     if (!has_phis) {            // No Phi users?  Nothing merging?
 498       for (uint i = 1; i < req()-1; i++) {
 499         Node *if1 = in(i);
 500         if( !if1 ) continue;
 501         Node *iff = if1->in(0);
 502         if( !iff || !iff->is_If() ) continue;
 503         for( uint j=i+1; j<req(); j++ ) {
 504           if( in(j) && in(j)->in(0) == iff &&
 505               if1->Opcode() != in(j)->Opcode() ) {
 506             // Add the IF Projections to the worklist. They (and the IF itself)
 507             // will be eliminated if dead.
 508             phase->is_IterGVN()->add_users_to_worklist(iff);
 509             set_req(i, iff->in(0));// Skip around the useless IF diamond
 510             set_req(j, NULL);
 511             return this;      // Record progress
 512           }
 513         }
 514       }

 884   if (iff1 == iff2) {
 885     igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
 886     igvn->replace_input_of(region, idx1, iff1->in(0));
 887     igvn->replace_input_of(region, idx2, igvn->C->top());
 888     return (region == this); // Remove useless if (both projections map to the same control/value)
 889   }
 890   BoolNode* bol1 = iff1->in(1)->isa_Bool();
 891   BoolNode* bol2 = iff2->in(1)->isa_Bool();
 892   if (bol1 == NULL || bol2 == NULL) {
 893     return false; // No bool inputs found
 894   }
 895   Node* cmp1 = bol1->in(1);
 896   Node* cmp2 = bol2->in(1);
 897   bool commute = false;
 898   if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
 899     return false; // No comparison
 900   } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
 901              cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
 902              cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
 903              cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
 904              cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck()) {

 905     // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
 906     // SubTypeCheck is not commutative
 907     return false;
 908   } else if (cmp1 != cmp2) {
 909     if (cmp1->in(1) == cmp2->in(2) &&
 910         cmp1->in(2) == cmp2->in(1)) {
 911       commute = true; // Same but swapped inputs, commute the test
 912     } else {
 913       return false; // Ifs are not comparing the same values
 914     }
 915   }
 916   proj1 = proj1->other_if_proj();
 917   proj2 = proj2->other_if_proj();
 918   if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
 919          proj2->unique_ctrl_out_or_null() == this) ||
 920         (proj2->unique_ctrl_out_or_null() == iff1 &&
 921          proj1->unique_ctrl_out_or_null() == this))) {
 922     return false; // Ifs are not connected through other projs
 923   }
 924   // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged

 965 
 966 //=============================================================================
 967 // note that these functions assume that the _adr_type field is flattened
 968 uint PhiNode::hash() const {
 969   const Type* at = _adr_type;
 970   return TypeNode::hash() + (at ? at->hash() : 0);
 971 }
 972 bool PhiNode::cmp( const Node &n ) const {
 973   return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
 974 }
 975 static inline
 976 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
 977   if (at == NULL || at == TypePtr::BOTTOM)  return at;
 978   return Compile::current()->alias_type(at)->adr_type();
 979 }
 980 
 981 //----------------------------make---------------------------------------------
 982 // create a new phi with edges matching r and set (initially) to x
 983 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
 984   uint preds = r->req();   // Number of predecessor paths
 985   assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at");
 986   PhiNode* p = new PhiNode(r, t, at);
 987   for (uint j = 1; j < preds; j++) {
 988     // Fill in all inputs, except those which the region does not yet have
 989     if (r->in(j) != NULL)
 990       p->init_req(j, x);
 991   }
 992   return p;
 993 }
 994 PhiNode* PhiNode::make(Node* r, Node* x) {
 995   const Type*    t  = x->bottom_type();
 996   const TypePtr* at = NULL;
 997   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
 998   return make(r, x, t, at);
 999 }
1000 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1001   const Type*    t  = x->bottom_type();
1002   const TypePtr* at = NULL;
1003   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
1004   return new PhiNode(r, t, at);
1005 }

1094       np->as_Phi()->verify_adr_type(visited, at);
1095     } else if (n->bottom_type() == Type::TOP
1096                || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1097       // ignore top inputs
1098     } else {
1099       const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1100       // recheck phi/non-phi consistency at leaves:
1101       assert((nat != NULL) == (at != NULL), "");
1102       assert(nat == at || nat == TypePtr::BOTTOM,
1103              "adr_type must be consistent at leaves of phi nest");
1104     }
1105   }
1106 }
1107 
1108 // Verify a whole nest of phis rooted at this one.
1109 void PhiNode::verify_adr_type(bool recursive) const {
1110   if (VMError::is_error_reported())  return;  // muzzle asserts when debugging an error
1111   if (Node::in_dump())               return;  // muzzle asserts when printing
1112 
1113   assert((_type == Type::MEMORY) == (_adr_type != NULL), "adr_type for memory phis only");








1114 
1115   if (!VerifyAliases)       return;  // verify thoroughly only if requested
1116 
1117   assert(_adr_type == flatten_phi_adr_type(_adr_type),
1118          "Phi::adr_type must be pre-normalized");
1119 
1120   if (recursive) {
1121     VectorSet visited;
1122     verify_adr_type(visited, _adr_type);
1123   }
1124 }
1125 #endif
1126 
1127 
1128 //------------------------------Value------------------------------------------
1129 // Compute the type of the PhiNode
1130 const Type* PhiNode::Value(PhaseGVN* phase) const {
1131   Node *r = in(0);              // RegionNode
1132   if( !r )                      // Copy or dead
1133     return in(1) ? phase->type(in(1)) : Type::TOP;

1356 Node* PhiNode::Identity(PhaseGVN* phase) {
1357   // Check for no merging going on
1358   // (There used to be special-case code here when this->region->is_Loop.
1359   // It would check for a tributary phi on the backedge that the main phi
1360   // trivially, perhaps with a single cast.  The unique_input method
1361   // does all this and more, by reducing such tributaries to 'this'.)
1362   Node* uin = unique_input(phase, false);
1363   if (uin != NULL) {
1364     return uin;
1365   }
1366 
1367   int true_path = is_diamond_phi();
1368   // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet.
1369   if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) {
1370     Node* id = is_cmove_id(phase, true_path);
1371     if (id != NULL) {
1372       return id;
1373     }
1374   }
1375 








1376   // Looking for phis with identical inputs.  If we find one that has
1377   // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1378   if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1379       TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1380     uint phi_len = req();
1381     Node* phi_reg = region();
1382     for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1383       Node* u = phi_reg->fast_out(i);
1384       if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1385           u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1386           u->req() == phi_len) {
1387         for (uint j = 1; j < phi_len; j++) {
1388           if (in(j) != u->in(j)) {
1389             u = NULL;
1390             break;
1391           }
1392         }
1393         if (u != NULL) {
1394           return u;
1395         }

1878         } else if (rc->in(0)->in(1) != NULL &&
1879                    rc->in(0)->in(1)->is_Bool()) {
1880           if (worklist.member(rc->in(0)->in(1))) {
1881             delay = true;
1882           } else if (rc->in(0)->in(1)->in(1) != NULL &&
1883                      rc->in(0)->in(1)->in(1)->is_Cmp()) {
1884             if (worklist.member(rc->in(0)->in(1)->in(1))) {
1885               delay = true;
1886             }
1887           }
1888         }
1889       }
1890     }
1891   }
1892   if (delay) {
1893     worklist.push(this);
1894   }
1895   return delay;
1896 }
1897 












































1898 //------------------------------Ideal------------------------------------------
1899 // Return a node which is more "ideal" than the current node.  Must preserve
1900 // the CFG, but we can still strip out dead paths.
1901 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1902   Node *r = in(0);              // RegionNode
1903   assert(r != NULL && r->is_Region(), "this phi must have a region");
1904   assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge");
1905 
1906   // Note: During parsing, phis are often transformed before their regions.
1907   // This means we have to use type_or_null to defend against untyped regions.
1908   if( phase->type_or_null(r) == Type::TOP ) // Dead code?
1909     return NULL;                // No change
1910 
1911   Node *top = phase->C->top();
1912   bool new_phi = (outcnt() == 0); // transforming new Phi
1913   // No change for igvn if new phi is not hooked
1914   if (new_phi && can_reshape)
1915     return NULL;
1916 
1917   // The are 2 situations when only one valid phi's input is left

2179           for (uint i = 1; i < req(); i++) {
2180             offset->init_req(i, in(i)->in(AddPNode::Offset));
2181           }
2182           phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2183         }
2184         return new AddPNode(base, address, offset);
2185       }
2186     }
2187   }
2188 
2189   // Split phis through memory merges, so that the memory merges will go away.
2190   // Piggy-back this transformation on the search for a unique input....
2191   // It will be as if the merged memory is the unique value of the phi.
2192   // (Do not attempt this optimization unless parsing is complete.
2193   // It would make the parser's memory-merge logic sick.)
2194   // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2195   if (progress == NULL && can_reshape && type() == Type::MEMORY) {
2196     // see if this phi should be sliced
2197     uint merge_width = 0;
2198     bool saw_self = false;


2199     for( uint i=1; i<req(); ++i ) {// For all paths in
2200       Node *ii = in(i);
2201       // TOP inputs should not be counted as safe inputs because if the
2202       // Phi references itself through all other inputs then splitting the
2203       // Phi through memory merges would create dead loop at later stage.
2204       if (ii == top) {
2205         return NULL; // Delay optimization until graph is cleaned.
2206       }
2207       if (ii->is_MergeMem()) {
2208         MergeMemNode* n = ii->as_MergeMem();
2209         merge_width = MAX2(merge_width, n->req());
2210         saw_self = saw_self || (n->base_memory() == this);


2211       }
2212     }
2213 
2214     // This restriction is temporarily necessary to ensure termination:
2215     if (!saw_self && adr_type() == TypePtr::BOTTOM)  merge_width = 0;
2216 
2217     if (merge_width > Compile::AliasIdxRaw) {
2218       // found at least one non-empty MergeMem
2219       const TypePtr* at = adr_type();
2220       if (at != TypePtr::BOTTOM) {
2221         // Patch the existing phi to select an input from the merge:
2222         // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2223         //     Phi:AT1(...m1...)
2224         int alias_idx = phase->C->get_alias_index(at);
2225         for (uint i=1; i<req(); ++i) {
2226           Node *ii = in(i);
2227           if (ii->is_MergeMem()) {
2228             MergeMemNode* n = ii->as_MergeMem();
2229             // compress paths and change unreachable cycles to TOP
2230             // If not, we can update the input infinitely along a MergeMem cycle
2231             // Equivalent code is in MemNode::Ideal_common
2232             Node *m  = phase->transform(n);
2233             if (outcnt() == 0) {  // Above transform() may kill us!
2234               return top;
2235             }

2266         if (!saw_safe_input) {
2267           // There is a dead loop: All inputs are either dead or reference back to this phi
2268           return top;
2269         }
2270 
2271         // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2272         //     MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2273         PhaseIterGVN* igvn = phase->is_IterGVN();
2274         assert(igvn != NULL, "sanity check");
2275         Node* hook = new Node(1);
2276         PhiNode* new_base = (PhiNode*) clone();
2277         // Must eagerly register phis, since they participate in loops.
2278         igvn->register_new_node_with_optimizer(new_base);
2279         hook->add_req(new_base);
2280 
2281         MergeMemNode* result = MergeMemNode::make(new_base);
2282         for (uint i = 1; i < req(); ++i) {
2283           Node *ii = in(i);
2284           if (ii->is_MergeMem()) {
2285             MergeMemNode* n = ii->as_MergeMem();





2286             for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2287               // If we have not seen this slice yet, make a phi for it.
2288               bool made_new_phi = false;
2289               if (mms.is_empty()) {
2290                 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2291                 made_new_phi = true;
2292                 igvn->register_new_node_with_optimizer(new_phi);
2293                 hook->add_req(new_phi);
2294                 mms.set_memory(new_phi);
2295               }
2296               Node* phi = mms.memory();
2297               assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2298               phi->set_req(i, mms.memory2());
2299             }
2300           }
2301         }
2302         // Distribute all self-loops.
2303         { // (Extra braces to hide mms.)
2304           for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2305             Node* phi = mms.memory();

2384             if (is_decodeN) {
2385               new_ii = new EncodePNode(ii, narrow_t);
2386             } else {
2387               new_ii = new EncodePKlassNode(ii, narrow_t);
2388             }
2389             igvn->register_new_node_with_optimizer(new_ii);
2390           }
2391         }
2392         new_phi->set_req(i, new_ii);
2393       }
2394       igvn->register_new_node_with_optimizer(new_phi, this);
2395       if (is_decodeN) {
2396         progress = new DecodeNNode(new_phi, bottom_type());
2397       } else {
2398         progress = new DecodeNKlassNode(new_phi, bottom_type());
2399       }
2400     }
2401   }
2402 #endif
2403 











































































2404   // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2405   if (EnableVectorReboxing && can_reshape && progress == NULL && type()->isa_oopptr()) {
2406     progress = merge_through_phi(this, phase->is_IterGVN());
2407   }
2408 
2409   return progress;              // Return any progress
2410 }
2411 
2412 Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) {
2413   Node_Stack stack(1);
2414   VectorSet  visited;
2415   Node_List  node_map;
2416 
2417   stack.push(root_phi, 1); // ignore control
2418   visited.set(root_phi->_idx);
2419 
2420   Node* new_phi = new PhiNode(root_phi->in(0), t);
2421   node_map.map(root_phi->_idx, new_phi);
2422 
2423   while (stack.is_nonempty()) {

2476         }
2477       } else if (in->Opcode() == Op_VectorBox) {
2478         VectorBoxNode* vbox = static_cast<VectorBoxNode*>(in);
2479         if (cached_vbox == NULL) {
2480           cached_vbox = vbox;
2481         } else if (vbox->vec_type() != cached_vbox->vec_type()) {
2482           // TODO: vector type mismatch can be handled with additional reinterpret casts
2483           assert(Type::cmp(vbox->vec_type(), cached_vbox->vec_type()) != 0, "inconsistent");
2484           return NULL; // not optimizable: vector type mismatch
2485         } else if (vbox->box_type() != cached_vbox->box_type()) {
2486           assert(Type::cmp(vbox->box_type(), cached_vbox->box_type()) != 0, "inconsistent");
2487           return NULL; // not optimizable: box type mismatch
2488         }
2489       } else {
2490         return NULL; // not optimizable: neither Phi nor VectorBox
2491       }
2492     } else {
2493       stack.pop();
2494     }
2495   }

2496   assert(cached_vbox != NULL, "sanity");
2497   const TypeInstPtr* btype = cached_vbox->box_type();
2498   const TypeVect*    vtype = cached_vbox->vec_type();
2499   Node* new_vbox_phi = clone_through_phi(root_phi, btype, VectorBoxNode::Box,   igvn);
2500   Node* new_vect_phi = clone_through_phi(root_phi, vtype, VectorBoxNode::Value, igvn);
2501   return new VectorBoxNode(igvn->C, new_vbox_phi, new_vect_phi, btype, vtype);
2502 }
2503 
2504 bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) {
2505   // First, take the short cut when we know it is a loop and the EntryControl data path is dead.
2506   // The loop node may only have one input because the entry path was removed in PhaseIdealLoop::Dominators().
2507   // Then, check if there is a data loop when the phi references itself directly or through other data nodes.
2508   assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs");
2509   const bool is_loop = (r->is_Loop() && r->req() == 3);
2510   const Node* top = phase->C->top();
2511   if (is_loop) {
2512     return !uin->eqv_uncast(in(LoopNode::EntryControl));
2513   } else {
2514     // We have a data loop either with an unsafe data reference or if a region is unreachable.
2515     return is_unsafe_data_reference(uin)

2720   return in(0)->in(0);
2721 }
2722 
2723 
2724 #ifndef PRODUCT
2725 void CatchProjNode::dump_spec(outputStream *st) const {
2726   ProjNode::dump_spec(st);
2727   st->print("@bci %d ",_handler_bci);
2728 }
2729 #endif
2730 
2731 //=============================================================================
2732 //------------------------------Identity---------------------------------------
2733 // Check for CreateEx being Identity.
2734 Node* CreateExNode::Identity(PhaseGVN* phase) {
2735   if( phase->type(in(1)) == Type::TOP ) return in(1);
2736   if( phase->type(in(0)) == Type::TOP ) return in(0);
2737   // We only come from CatchProj, unless the CatchProj goes away.
2738   // If the CatchProj is optimized away, then we just carry the
2739   // exception oop through.






2740   CallNode *call = in(1)->in(0)->as_Call();
2741 
2742   return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
2743           in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
2744 }
2745 
2746 //=============================================================================
2747 //------------------------------Value------------------------------------------
2748 // Check for being unreachable.
2749 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
2750   if (!in(0) || in(0)->is_top()) return Type::TOP;
2751   return bottom_type();
2752 }
2753 
2754 //------------------------------Ideal------------------------------------------
2755 // Check for no longer being part of a loop
2756 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2757   if (can_reshape && !in(0)->is_Region()) {
2758     // Dead code elimination can sometimes delete this projection so
2759     // if it's not there, there's nothing to do.

   1 /*
   2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/inlinetypenode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/narrowptrnode.hpp"
  41 #include "opto/mulnode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/regalloc.hpp"
  44 #include "opto/regmask.hpp"
  45 #include "opto/runtime.hpp"
  46 #include "opto/subnode.hpp"
  47 #include "opto/vectornode.hpp"
  48 #include "utilities/vmError.hpp"
  49 
  50 // Portions of code courtesy of Clifford Click
  51 
  52 // Optimization - Graph Style
  53 
  54 //=============================================================================
  55 //------------------------------Value------------------------------------------
  56 // Compute the type of the RegionNode.

 419     } else if (n->is_NeverBranch()) {
 420       // Only follow the loop-internal projection, not the NeverBranch exit
 421       ProjNode* proj = n->as_NeverBranch()->proj_out_or_null(0);
 422       assert(proj != nullptr, "must find loop-internal projection of NeverBranch");
 423       worklist.push(proj);
 424     } else {
 425       // Traverse all CFG outputs
 426       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 427         Node* use = n->fast_out(i);
 428         if (use->is_CFG()) {
 429           worklist.push(use);
 430         }
 431       }
 432     }
 433   }
 434   // No exit found for any loop -> all are infinite
 435   return true;
 436 }
 437 #endif //ASSERT
 438 
 439 Node* PhiNode::try_clean_mem_phi(PhaseGVN *phase) {
 440   // Incremental inlining + PhaseStringOpts sometimes produce:
 441   //
 442   // cmpP with 1 top input
 443   //           |
 444   //          If
 445   //         /  \
 446   //   IfFalse  IfTrue  /- Some Node
 447   //         \  /      /    /
 448   //        Region    / /-MergeMem
 449   //             \---Phi
 450   //
 451   //
 452   // It's expected by PhaseStringOpts that the Region goes away and is
 453   // replaced by If's control input but because there's still a Phi,
 454   // the Region stays in the graph. The top input from the cmpP is
 455   // propagated forward and a subgraph that is useful goes away. The
 456   // code below replaces the Phi with the MergeMem so that the Region
 457   // is simplified.
 458 
 459   if (type() == Type::MEMORY && is_diamond_phi(true)) {

 460     MergeMemNode* m = NULL;
 461     assert(req() == 3, "same as region");
 462     Node* r = in(0);
 463     for (uint i = 1; i < 3; ++i) {
 464       Node *mem = in(i);
 465       if (mem && mem->is_MergeMem() && r->in(i)->outcnt() == 1) {
 466         // Nothing is control-dependent on path #i except the region itself.
 467         m = mem->as_MergeMem();
 468         uint j = 3 - i;
 469         Node* other = in(j);
 470         if (other && other == m->base_memory()) {
 471           // m is a successor memory to other, and is not pinned inside the diamond, so push it out.
 472           // This will allow the diamond to collapse completely.
 473           return m;

 474         }
 475       }
 476     }
 477   }
 478   return NULL;
 479 }
 480 
 481 //------------------------------Ideal------------------------------------------
 482 // Return a node which is more "ideal" than the current node.  Must preserve
 483 // the CFG, but we can still strip out dead paths.
 484 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 485   if( !can_reshape && !in(0) ) return NULL;     // Already degraded to a Copy
 486   assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
 487 
 488   // Check for RegionNode with no Phi users and both inputs come from either
 489   // arm of the same IF.  If found, then the control-flow split is useless.
 490   bool has_phis = false;
 491   if (can_reshape) {            // Need DU info to check for Phi users
 492     has_phis = (has_phi() != NULL);       // Cache result
 493     if (has_phis) {
 494       PhiNode* phi = has_unique_phi();
 495       if (phi != NULL) {
 496         Node* m = phi->try_clean_mem_phi(phase);
 497         if (m != NULL) {
 498           phase->is_IterGVN()->replace_node(phi, m);
 499           has_phis = false;
 500         }
 501       }
 502     }
 503 
 504     if (!has_phis) {            // No Phi users?  Nothing merging?
 505       for (uint i = 1; i < req()-1; i++) {
 506         Node *if1 = in(i);
 507         if( !if1 ) continue;
 508         Node *iff = if1->in(0);
 509         if( !iff || !iff->is_If() ) continue;
 510         for( uint j=i+1; j<req(); j++ ) {
 511           if( in(j) && in(j)->in(0) == iff &&
 512               if1->Opcode() != in(j)->Opcode() ) {
 513             // Add the IF Projections to the worklist. They (and the IF itself)
 514             // will be eliminated if dead.
 515             phase->is_IterGVN()->add_users_to_worklist(iff);
 516             set_req(i, iff->in(0));// Skip around the useless IF diamond
 517             set_req(j, NULL);
 518             return this;      // Record progress
 519           }
 520         }
 521       }

 891   if (iff1 == iff2) {
 892     igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
 893     igvn->replace_input_of(region, idx1, iff1->in(0));
 894     igvn->replace_input_of(region, idx2, igvn->C->top());
 895     return (region == this); // Remove useless if (both projections map to the same control/value)
 896   }
 897   BoolNode* bol1 = iff1->in(1)->isa_Bool();
 898   BoolNode* bol2 = iff2->in(1)->isa_Bool();
 899   if (bol1 == NULL || bol2 == NULL) {
 900     return false; // No bool inputs found
 901   }
 902   Node* cmp1 = bol1->in(1);
 903   Node* cmp2 = bol2->in(1);
 904   bool commute = false;
 905   if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
 906     return false; // No comparison
 907   } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
 908              cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
 909              cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
 910              cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
 911              cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() ||
 912              cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) {
 913     // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
 914     // SubTypeCheck is not commutative
 915     return false;
 916   } else if (cmp1 != cmp2) {
 917     if (cmp1->in(1) == cmp2->in(2) &&
 918         cmp1->in(2) == cmp2->in(1)) {
 919       commute = true; // Same but swapped inputs, commute the test
 920     } else {
 921       return false; // Ifs are not comparing the same values
 922     }
 923   }
 924   proj1 = proj1->other_if_proj();
 925   proj2 = proj2->other_if_proj();
 926   if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
 927          proj2->unique_ctrl_out_or_null() == this) ||
 928         (proj2->unique_ctrl_out_or_null() == iff1 &&
 929          proj1->unique_ctrl_out_or_null() == this))) {
 930     return false; // Ifs are not connected through other projs
 931   }
 932   // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged

 973 
 974 //=============================================================================
 975 // note that these functions assume that the _adr_type field is flattened
 976 uint PhiNode::hash() const {
 977   const Type* at = _adr_type;
 978   return TypeNode::hash() + (at ? at->hash() : 0);
 979 }
 980 bool PhiNode::cmp( const Node &n ) const {
 981   return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
 982 }
 983 static inline
 984 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
 985   if (at == NULL || at == TypePtr::BOTTOM)  return at;
 986   return Compile::current()->alias_type(at)->adr_type();
 987 }
 988 
 989 //----------------------------make---------------------------------------------
 990 // create a new phi with edges matching r and set (initially) to x
 991 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
 992   uint preds = r->req();   // Number of predecessor paths
 993   assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flattened_accesses_share_alias()), "flatten at");
 994   PhiNode* p = new PhiNode(r, t, at);
 995   for (uint j = 1; j < preds; j++) {
 996     // Fill in all inputs, except those which the region does not yet have
 997     if (r->in(j) != NULL)
 998       p->init_req(j, x);
 999   }
1000   return p;
1001 }
1002 PhiNode* PhiNode::make(Node* r, Node* x) {
1003   const Type*    t  = x->bottom_type();
1004   const TypePtr* at = NULL;
1005   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
1006   return make(r, x, t, at);
1007 }
1008 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1009   const Type*    t  = x->bottom_type();
1010   const TypePtr* at = NULL;
1011   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
1012   return new PhiNode(r, t, at);
1013 }

1102       np->as_Phi()->verify_adr_type(visited, at);
1103     } else if (n->bottom_type() == Type::TOP
1104                || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1105       // ignore top inputs
1106     } else {
1107       const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1108       // recheck phi/non-phi consistency at leaves:
1109       assert((nat != NULL) == (at != NULL), "");
1110       assert(nat == at || nat == TypePtr::BOTTOM,
1111              "adr_type must be consistent at leaves of phi nest");
1112     }
1113   }
1114 }
1115 
1116 // Verify a whole nest of phis rooted at this one.
1117 void PhiNode::verify_adr_type(bool recursive) const {
1118   if (VMError::is_error_reported())  return;  // muzzle asserts when debugging an error
1119   if (Node::in_dump())               return;  // muzzle asserts when printing
1120 
1121   assert((_type == Type::MEMORY) == (_adr_type != NULL), "adr_type for memory phis only");
1122   // Flat array element shouldn't get their own memory slice until flattened_accesses_share_alias is cleared.
1123   // It could be the graph has no loads/stores and flattened_accesses_share_alias is never cleared. EA could still
1124   // creates per element Phis but that wouldn't be a problem as there are no memory accesses for that array.
1125   assert(_adr_type == NULL || _adr_type->isa_aryptr() == NULL ||
1126          _adr_type->is_aryptr()->is_known_instance() ||
1127          !_adr_type->is_aryptr()->is_flat() ||
1128          !Compile::current()->flattened_accesses_share_alias() ||
1129          _adr_type == TypeAryPtr::INLINES, "flat array element shouldn't get its own slice yet");
1130 
1131   if (!VerifyAliases)       return;  // verify thoroughly only if requested
1132 
1133   assert(_adr_type == flatten_phi_adr_type(_adr_type),
1134          "Phi::adr_type must be pre-normalized");
1135 
1136   if (recursive) {
1137     VectorSet visited;
1138     verify_adr_type(visited, _adr_type);
1139   }
1140 }
1141 #endif
1142 
1143 
1144 //------------------------------Value------------------------------------------
1145 // Compute the type of the PhiNode
1146 const Type* PhiNode::Value(PhaseGVN* phase) const {
1147   Node *r = in(0);              // RegionNode
1148   if( !r )                      // Copy or dead
1149     return in(1) ? phase->type(in(1)) : Type::TOP;

1372 Node* PhiNode::Identity(PhaseGVN* phase) {
1373   // Check for no merging going on
1374   // (There used to be special-case code here when this->region->is_Loop.
1375   // It would check for a tributary phi on the backedge that the main phi
1376   // trivially, perhaps with a single cast.  The unique_input method
1377   // does all this and more, by reducing such tributaries to 'this'.)
1378   Node* uin = unique_input(phase, false);
1379   if (uin != NULL) {
1380     return uin;
1381   }
1382 
1383   int true_path = is_diamond_phi();
1384   // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet.
1385   if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) {
1386     Node* id = is_cmove_id(phase, true_path);
1387     if (id != NULL) {
1388       return id;
1389     }
1390   }
1391 
1392   if (phase->is_IterGVN()) {
1393     Node* m = try_clean_mem_phi(phase);
1394     if (m != NULL) {
1395       return m;
1396     }
1397   }
1398 
1399 
1400   // Looking for phis with identical inputs.  If we find one that has
1401   // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1402   if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1403       TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1404     uint phi_len = req();
1405     Node* phi_reg = region();
1406     for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1407       Node* u = phi_reg->fast_out(i);
1408       if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1409           u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1410           u->req() == phi_len) {
1411         for (uint j = 1; j < phi_len; j++) {
1412           if (in(j) != u->in(j)) {
1413             u = NULL;
1414             break;
1415           }
1416         }
1417         if (u != NULL) {
1418           return u;
1419         }

1902         } else if (rc->in(0)->in(1) != NULL &&
1903                    rc->in(0)->in(1)->is_Bool()) {
1904           if (worklist.member(rc->in(0)->in(1))) {
1905             delay = true;
1906           } else if (rc->in(0)->in(1)->in(1) != NULL &&
1907                      rc->in(0)->in(1)->in(1)->is_Cmp()) {
1908             if (worklist.member(rc->in(0)->in(1)->in(1))) {
1909               delay = true;
1910             }
1911           }
1912         }
1913       }
1914     }
1915   }
1916   if (delay) {
1917     worklist.push(this);
1918   }
1919   return delay;
1920 }
1921 
1922 // Push inline type input nodes (and null) down through the phi recursively (can handle data loops).
1923 InlineTypeNode* PhiNode::push_inline_types_through(PhaseGVN* phase, bool can_reshape, ciInlineKlass* vk, bool is_init) {
1924   InlineTypeNode* vt = InlineTypeNode::make_null(*phase, vk)->clone_with_phis(phase, in(0), is_init);
1925   if (can_reshape) {
1926     // Replace phi right away to be able to use the inline
1927     // type node when reaching the phi again through data loops.
1928     PhaseIterGVN* igvn = phase->is_IterGVN();
1929     for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1930       Node* u = fast_out(i);
1931       igvn->rehash_node_delayed(u);
1932       imax -= u->replace_edge(this, vt);
1933       --i;
1934     }
1935     igvn->rehash_node_delayed(this);
1936     assert(outcnt() == 0, "should be dead now");
1937   }
1938   ResourceMark rm;
1939   Node_List casts;
1940   for (uint i = 1; i < req(); ++i) {
1941     Node* n = in(i);
1942     while (n->is_ConstraintCast()) {
1943       casts.push(n);
1944       n = n->in(1);
1945     }
1946     if (phase->type(n)->is_zero_type()) {
1947       n = InlineTypeNode::make_null(*phase, vk);
1948     } else if (n->is_Phi()) {
1949       assert(can_reshape, "can only handle phis during IGVN");
1950       n = phase->transform(n->as_Phi()->push_inline_types_through(phase, can_reshape, vk, is_init));
1951     }
1952     while (casts.size() != 0) {
1953       // Push the cast(s) through the InlineTypeNode
1954       Node* cast = casts.pop()->clone();
1955       cast->set_req_X(1, n->as_InlineType()->get_oop(), phase);
1956       n = n->clone();
1957       n->as_InlineType()->set_oop(phase->transform(cast));
1958       n = phase->transform(n);
1959     }
1960     bool transform = !can_reshape && (i == (req()-1)); // Transform phis on last merge
1961     vt->merge_with(phase, n->as_InlineType(), i, transform);
1962   }
1963   return vt;
1964 }
1965 
1966 //------------------------------Ideal------------------------------------------
1967 // Return a node which is more "ideal" than the current node.  Must preserve
1968 // the CFG, but we can still strip out dead paths.
1969 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1970   Node *r = in(0);              // RegionNode
1971   assert(r != NULL && r->is_Region(), "this phi must have a region");
1972   assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge");
1973 
1974   // Note: During parsing, phis are often transformed before their regions.
1975   // This means we have to use type_or_null to defend against untyped regions.
1976   if( phase->type_or_null(r) == Type::TOP ) // Dead code?
1977     return NULL;                // No change
1978 
1979   Node *top = phase->C->top();
1980   bool new_phi = (outcnt() == 0); // transforming new Phi
1981   // No change for igvn if new phi is not hooked
1982   if (new_phi && can_reshape)
1983     return NULL;
1984 
1985   // The are 2 situations when only one valid phi's input is left

2247           for (uint i = 1; i < req(); i++) {
2248             offset->init_req(i, in(i)->in(AddPNode::Offset));
2249           }
2250           phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2251         }
2252         return new AddPNode(base, address, offset);
2253       }
2254     }
2255   }
2256 
2257   // Split phis through memory merges, so that the memory merges will go away.
2258   // Piggy-back this transformation on the search for a unique input....
2259   // It will be as if the merged memory is the unique value of the phi.
2260   // (Do not attempt this optimization unless parsing is complete.
2261   // It would make the parser's memory-merge logic sick.)
2262   // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2263   if (progress == NULL && can_reshape && type() == Type::MEMORY) {
2264     // see if this phi should be sliced
2265     uint merge_width = 0;
2266     bool saw_self = false;
2267     // TODO revisit this with JDK-8247216
2268     bool mergemem_only = true;
2269     for( uint i=1; i<req(); ++i ) {// For all paths in
2270       Node *ii = in(i);
2271       // TOP inputs should not be counted as safe inputs because if the
2272       // Phi references itself through all other inputs then splitting the
2273       // Phi through memory merges would create dead loop at later stage.
2274       if (ii == top) {
2275         return NULL; // Delay optimization until graph is cleaned.
2276       }
2277       if (ii->is_MergeMem()) {
2278         MergeMemNode* n = ii->as_MergeMem();
2279         merge_width = MAX2(merge_width, n->req());
2280         saw_self = saw_self || (n->base_memory() == this);
2281       } else {
2282         mergemem_only = false;
2283       }
2284     }
2285 
2286     // This restriction is temporarily necessary to ensure termination:
2287     if (!mergemem_only && !saw_self && adr_type() == TypePtr::BOTTOM)  merge_width = 0;
2288 
2289     if (merge_width > Compile::AliasIdxRaw) {
2290       // found at least one non-empty MergeMem
2291       const TypePtr* at = adr_type();
2292       if (at != TypePtr::BOTTOM) {
2293         // Patch the existing phi to select an input from the merge:
2294         // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2295         //     Phi:AT1(...m1...)
2296         int alias_idx = phase->C->get_alias_index(at);
2297         for (uint i=1; i<req(); ++i) {
2298           Node *ii = in(i);
2299           if (ii->is_MergeMem()) {
2300             MergeMemNode* n = ii->as_MergeMem();
2301             // compress paths and change unreachable cycles to TOP
2302             // If not, we can update the input infinitely along a MergeMem cycle
2303             // Equivalent code is in MemNode::Ideal_common
2304             Node *m  = phase->transform(n);
2305             if (outcnt() == 0) {  // Above transform() may kill us!
2306               return top;
2307             }

2338         if (!saw_safe_input) {
2339           // There is a dead loop: All inputs are either dead or reference back to this phi
2340           return top;
2341         }
2342 
2343         // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2344         //     MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2345         PhaseIterGVN* igvn = phase->is_IterGVN();
2346         assert(igvn != NULL, "sanity check");
2347         Node* hook = new Node(1);
2348         PhiNode* new_base = (PhiNode*) clone();
2349         // Must eagerly register phis, since they participate in loops.
2350         igvn->register_new_node_with_optimizer(new_base);
2351         hook->add_req(new_base);
2352 
2353         MergeMemNode* result = MergeMemNode::make(new_base);
2354         for (uint i = 1; i < req(); ++i) {
2355           Node *ii = in(i);
2356           if (ii->is_MergeMem()) {
2357             MergeMemNode* n = ii->as_MergeMem();
2358             if (igvn) {
2359               // TODO revisit this with JDK-8247216
2360               // Put 'n' on the worklist because it might be modified by MergeMemStream::iteration_setup
2361               igvn->_worklist.push(n);
2362             }
2363             for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2364               // If we have not seen this slice yet, make a phi for it.
2365               bool made_new_phi = false;
2366               if (mms.is_empty()) {
2367                 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2368                 made_new_phi = true;
2369                 igvn->register_new_node_with_optimizer(new_phi);
2370                 hook->add_req(new_phi);
2371                 mms.set_memory(new_phi);
2372               }
2373               Node* phi = mms.memory();
2374               assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2375               phi->set_req(i, mms.memory2());
2376             }
2377           }
2378         }
2379         // Distribute all self-loops.
2380         { // (Extra braces to hide mms.)
2381           for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2382             Node* phi = mms.memory();

2461             if (is_decodeN) {
2462               new_ii = new EncodePNode(ii, narrow_t);
2463             } else {
2464               new_ii = new EncodePKlassNode(ii, narrow_t);
2465             }
2466             igvn->register_new_node_with_optimizer(new_ii);
2467           }
2468         }
2469         new_phi->set_req(i, new_ii);
2470       }
2471       igvn->register_new_node_with_optimizer(new_phi, this);
2472       if (is_decodeN) {
2473         progress = new DecodeNNode(new_phi, bottom_type());
2474       } else {
2475         progress = new DecodeNKlassNode(new_phi, bottom_type());
2476       }
2477     }
2478   }
2479 #endif
2480 
2481   // Check recursively if inputs are either an inline type, constant null
2482   // or another Phi (including self references through data loops). If so,
2483   // push the inline types down through the phis to enable folding of loads.
2484   if (EnableValhalla && _type->isa_ptr() && req() > 2) {
2485     ResourceMark rm;
2486     Unique_Node_List worklist;
2487     worklist.push(this);
2488     bool can_optimize = true;
2489     ciInlineKlass* vk = NULL;
2490     // true if all IsInit inputs of all InlineType* nodes are true
2491     bool is_init = true;
2492     Node_List casts;
2493 
2494     // TODO 8302217 We need to prevent endless pushing through
2495     bool only_phi = (outcnt() != 0);
2496     for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2497       Node* n = fast_out(i);
2498       if (n->is_InlineType() && n->in(1) == this) {
2499         can_optimize = false;
2500         break;
2501       }
2502       if (!n->is_Phi()) {
2503         only_phi = false;
2504       }
2505     }
2506     if (only_phi) {
2507       can_optimize = false;
2508     }
2509     for (uint next = 0; next < worklist.size() && can_optimize; next++) {
2510       Node* phi = worklist.at(next);
2511       for (uint i = 1; i < phi->req() && can_optimize; i++) {
2512         Node* n = phi->in(i);
2513         if (n == NULL) {
2514           can_optimize = false;
2515           break;
2516         }
2517         while (n->is_ConstraintCast()) {
2518           if (n->in(0) != NULL && n->in(0)->is_top()) {
2519             // Will die, don't optimize
2520             can_optimize = false;
2521             break;
2522           }
2523           casts.push(n);
2524           n = n->in(1);
2525         }
2526         const Type* t = phase->type(n);
2527         if (n->is_InlineType() && (vk == NULL || vk == t->inline_klass())) {
2528           vk = (vk == NULL) ? t->inline_klass() : vk;
2529           if (phase->find_int_con(n->as_InlineType()->get_is_init(), 0) != 1) {
2530             is_init = false;
2531           }
2532         } else if (n->is_Phi() && can_reshape && n->bottom_type()->isa_ptr()) {
2533           worklist.push(n);
2534         } else if (t->is_zero_type()) {
2535           is_init = false;
2536         } else {
2537           can_optimize = false;
2538         }
2539       }
2540     }
2541     // Check if cast nodes can be pushed through
2542     const Type* t = Type::get_const_type(vk);
2543     while (casts.size() != 0 && can_optimize && t != NULL) {
2544       Node* cast = casts.pop();
2545       if (t->filter(cast->bottom_type()) == Type::TOP) {
2546         can_optimize = false;
2547       }
2548     }
2549     if (can_optimize && vk != NULL) {
2550 // TODO 8302217
2551 //      assert(!_type->isa_ptr() || _type->maybe_null() || is_init, "Phi not null but a possible null was seen");
2552       return push_inline_types_through(phase, can_reshape, vk, is_init);
2553     }
2554   }
2555 
2556   // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2557   if (EnableVectorReboxing && can_reshape && progress == NULL && type()->isa_oopptr()) {
2558     progress = merge_through_phi(this, phase->is_IterGVN());
2559   }
2560 
2561   return progress;              // Return any progress
2562 }
2563 
2564 Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) {
2565   Node_Stack stack(1);
2566   VectorSet  visited;
2567   Node_List  node_map;
2568 
2569   stack.push(root_phi, 1); // ignore control
2570   visited.set(root_phi->_idx);
2571 
2572   Node* new_phi = new PhiNode(root_phi->in(0), t);
2573   node_map.map(root_phi->_idx, new_phi);
2574 
2575   while (stack.is_nonempty()) {

2628         }
2629       } else if (in->Opcode() == Op_VectorBox) {
2630         VectorBoxNode* vbox = static_cast<VectorBoxNode*>(in);
2631         if (cached_vbox == NULL) {
2632           cached_vbox = vbox;
2633         } else if (vbox->vec_type() != cached_vbox->vec_type()) {
2634           // TODO: vector type mismatch can be handled with additional reinterpret casts
2635           assert(Type::cmp(vbox->vec_type(), cached_vbox->vec_type()) != 0, "inconsistent");
2636           return NULL; // not optimizable: vector type mismatch
2637         } else if (vbox->box_type() != cached_vbox->box_type()) {
2638           assert(Type::cmp(vbox->box_type(), cached_vbox->box_type()) != 0, "inconsistent");
2639           return NULL; // not optimizable: box type mismatch
2640         }
2641       } else {
2642         return NULL; // not optimizable: neither Phi nor VectorBox
2643       }
2644     } else {
2645       stack.pop();
2646     }
2647   }
2648 
2649   assert(cached_vbox != NULL, "sanity");
2650   const TypeInstPtr* btype = cached_vbox->box_type();
2651   const TypeVect*    vtype = cached_vbox->vec_type();
2652   Node* new_vbox_phi = clone_through_phi(root_phi, btype, VectorBoxNode::Box,   igvn);
2653   Node* new_vect_phi = clone_through_phi(root_phi, vtype, VectorBoxNode::Value, igvn);
2654   return new VectorBoxNode(igvn->C, new_vbox_phi, new_vect_phi, btype, vtype);
2655 }
2656 
2657 bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) {
2658   // First, take the short cut when we know it is a loop and the EntryControl data path is dead.
2659   // The loop node may only have one input because the entry path was removed in PhaseIdealLoop::Dominators().
2660   // Then, check if there is a data loop when the phi references itself directly or through other data nodes.
2661   assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs");
2662   const bool is_loop = (r->is_Loop() && r->req() == 3);
2663   const Node* top = phase->C->top();
2664   if (is_loop) {
2665     return !uin->eqv_uncast(in(LoopNode::EntryControl));
2666   } else {
2667     // We have a data loop either with an unsafe data reference or if a region is unreachable.
2668     return is_unsafe_data_reference(uin)

2873   return in(0)->in(0);
2874 }
2875 
2876 
2877 #ifndef PRODUCT
2878 void CatchProjNode::dump_spec(outputStream *st) const {
2879   ProjNode::dump_spec(st);
2880   st->print("@bci %d ",_handler_bci);
2881 }
2882 #endif
2883 
2884 //=============================================================================
2885 //------------------------------Identity---------------------------------------
2886 // Check for CreateEx being Identity.
2887 Node* CreateExNode::Identity(PhaseGVN* phase) {
2888   if( phase->type(in(1)) == Type::TOP ) return in(1);
2889   if( phase->type(in(0)) == Type::TOP ) return in(0);
2890   // We only come from CatchProj, unless the CatchProj goes away.
2891   // If the CatchProj is optimized away, then we just carry the
2892   // exception oop through.
2893 
2894   // CheckCastPPNode::Ideal() for inline types reuses the exception
2895   // paths of a call to perform an allocation: we can see a Phi here.
2896   if (in(1)->is_Phi()) {
2897     return this;
2898   }
2899   CallNode *call = in(1)->in(0)->as_Call();
2900 
2901   return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
2902           in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
2903 }
2904 
2905 //=============================================================================
2906 //------------------------------Value------------------------------------------
2907 // Check for being unreachable.
2908 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
2909   if (!in(0) || in(0)->is_top()) return Type::TOP;
2910   return bottom_type();
2911 }
2912 
2913 //------------------------------Ideal------------------------------------------
2914 // Check for no longer being part of a loop
2915 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2916   if (can_reshape && !in(0)->is_Region()) {
2917     // Dead code elimination can sometimes delete this projection so
2918     // if it's not there, there's nothing to do.
< prev index next >