< prev index next >

src/hotspot/share/opto/cfgnode.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/convertnode.hpp"

  36 #include "opto/loopnode.hpp"
  37 #include "opto/machnode.hpp"
  38 #include "opto/movenode.hpp"
  39 #include "opto/narrowptrnode.hpp"
  40 #include "opto/mulnode.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/regalloc.hpp"
  43 #include "opto/regmask.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "opto/subnode.hpp"
  46 #include "opto/vectornode.hpp"
  47 #include "utilities/vmError.hpp"
  48 
  49 // Portions of code courtesy of Clifford Click
  50 
  51 // Optimization - Graph Style
  52 
  53 //=============================================================================
  54 //------------------------------Value------------------------------------------
  55 // Compute the type of the RegionNode.

 501   if (left_path == nullptr || right_path == nullptr) {
 502     return false;
 503   }
 504   Node* diamond_if = left_path->in(0);
 505   if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
 506     // Not an IfNode merging a diamond or TOP.
 507     return false;
 508   }
 509 
 510   // Check for a proper bool/cmp
 511   const Node* bol = diamond_if->in(1);
 512   if (!bol->is_Bool()) {
 513     return false;
 514   }
 515   const Node* cmp = bol->in(1);
 516   if (!cmp->is_Cmp()) {
 517     return false;
 518   }
 519   return true;
 520 }

 521 //------------------------------Ideal------------------------------------------
 522 // Return a node which is more "ideal" than the current node.  Must preserve
 523 // the CFG, but we can still strip out dead paths.
 524 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 525   if( !can_reshape && !in(0) ) return nullptr;     // Already degraded to a Copy
 526   assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
 527 
 528   // Check for RegionNode with no Phi users and both inputs come from either
 529   // arm of the same IF.  If found, then the control-flow split is useless.
 530   bool has_phis = false;
 531   if (can_reshape) {            // Need DU info to check for Phi users
 532     try_clean_mem_phis(phase->is_IterGVN());
 533     has_phis = (has_phi() != nullptr);       // Cache result
 534 
 535     if (!has_phis) {            // No Phi users?  Nothing merging?
 536       for (uint i = 1; i < req()-1; i++) {
 537         Node *if1 = in(i);
 538         if( !if1 ) continue;
 539         Node *iff = if1->in(0);
 540         if( !iff || !iff->is_If() ) continue;

 947   if (iff1 == iff2) {
 948     igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
 949     igvn->replace_input_of(region, idx1, iff1->in(0));
 950     igvn->replace_input_of(region, idx2, igvn->C->top());
 951     return (region == this); // Remove useless if (both projections map to the same control/value)
 952   }
 953   BoolNode* bol1 = iff1->in(1)->isa_Bool();
 954   BoolNode* bol2 = iff2->in(1)->isa_Bool();
 955   if (bol1 == nullptr || bol2 == nullptr) {
 956     return false; // No bool inputs found
 957   }
 958   Node* cmp1 = bol1->in(1);
 959   Node* cmp2 = bol2->in(1);
 960   bool commute = false;
 961   if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
 962     return false; // No comparison
 963   } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
 964              cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
 965              cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
 966              cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
 967              cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck()) {

 968     // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
 969     // SubTypeCheck is not commutative
 970     return false;
 971   } else if (cmp1 != cmp2) {
 972     if (cmp1->in(1) == cmp2->in(2) &&
 973         cmp1->in(2) == cmp2->in(1)) {
 974       commute = true; // Same but swapped inputs, commute the test
 975     } else {
 976       return false; // Ifs are not comparing the same values
 977     }
 978   }
 979   proj1 = proj1->other_if_proj();
 980   proj2 = proj2->other_if_proj();
 981   if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
 982          proj2->unique_ctrl_out_or_null() == this) ||
 983         (proj2->unique_ctrl_out_or_null() == iff1 &&
 984          proj1->unique_ctrl_out_or_null() == this))) {
 985     return false; // Ifs are not connected through other projs
 986   }
 987   // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged

1026     st->print("#reducible ");
1027     break;
1028   case RegionNode::LoopStatus::NeverIrreducibleEntry:
1029     break; // nothing
1030   }
1031 }
1032 #endif
1033 
1034 // Find the one non-null required input.  RegionNode only
1035 Node *Node::nonnull_req() const {
1036   assert( is_Region(), "" );
1037   for( uint i = 1; i < _cnt; i++ )
1038     if( in(i) )
1039       return in(i);
1040   ShouldNotReachHere();
1041   return nullptr;
1042 }
1043 
1044 
1045 //=============================================================================
1046 // note that these functions assume that the _adr_type field is flattened
1047 uint PhiNode::hash() const {
1048   const Type* at = _adr_type;
1049   return TypeNode::hash() + (at ? at->hash() : 0);
1050 }
1051 bool PhiNode::cmp( const Node &n ) const {
1052   return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1053 }
1054 static inline
1055 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1056   if (at == nullptr || at == TypePtr::BOTTOM)  return at;
1057   return Compile::current()->alias_type(at)->adr_type();
1058 }
1059 
1060 //----------------------------make---------------------------------------------
1061 // create a new phi with edges matching r and set (initially) to x
1062 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1063   uint preds = r->req();   // Number of predecessor paths
1064   assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at");
1065   PhiNode* p = new PhiNode(r, t, at);
1066   for (uint j = 1; j < preds; j++) {
1067     // Fill in all inputs, except those which the region does not yet have
1068     if (r->in(j) != nullptr)
1069       p->init_req(j, x);
1070   }
1071   return p;
1072 }
1073 PhiNode* PhiNode::make(Node* r, Node* x) {
1074   const Type*    t  = x->bottom_type();
1075   const TypePtr* at = nullptr;
1076   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
1077   return make(r, x, t, at);
1078 }
1079 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1080   const Type*    t  = x->bottom_type();
1081   const TypePtr* at = nullptr;
1082   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
1083   return new PhiNode(r, t, at);
1084 }

1172       np->as_Phi()->verify_adr_type(visited, at);
1173     } else if (n->bottom_type() == Type::TOP
1174                || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1175       // ignore top inputs
1176     } else {
1177       const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1178       // recheck phi/non-phi consistency at leaves:
1179       assert((nat != nullptr) == (at != nullptr), "");
1180       assert(nat == at || nat == TypePtr::BOTTOM,
1181              "adr_type must be consistent at leaves of phi nest");
1182     }
1183   }
1184 }
1185 
1186 // Verify a whole nest of phis rooted at this one.
1187 void PhiNode::verify_adr_type(bool recursive) const {
1188   if (VMError::is_error_reported())  return;  // muzzle asserts when debugging an error
1189   if (Node::in_dump())               return;  // muzzle asserts when printing
1190 
1191   assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");








1192 
1193   if (!VerifyAliases)       return;  // verify thoroughly only if requested
1194 
1195   assert(_adr_type == flatten_phi_adr_type(_adr_type),
1196          "Phi::adr_type must be pre-normalized");
1197 
1198   if (recursive) {
1199     VectorSet visited;
1200     verify_adr_type(visited, _adr_type);
1201   }
1202 }
1203 #endif
1204 
1205 
1206 //------------------------------Value------------------------------------------
1207 // Compute the type of the PhiNode
1208 const Type* PhiNode::Value(PhaseGVN* phase) const {
1209   Node *r = in(0);              // RegionNode
1210   if( !r )                      // Copy or dead
1211     return in(1) ? phase->type(in(1)) : Type::TOP;

1390   assert(is_diamond_phi() > 0, "sanity");
1391   assert(req() == 3, "same as region");
1392   const Node* region = in(0);
1393   for (uint i = 1; i < 3; i++) {
1394     Node* phi_input = in(i);
1395     if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1396       // Nothing is control-dependent on path #i except the region itself.
1397       MergeMemNode* merge_mem = phi_input->as_MergeMem();
1398       uint j = 3 - i;
1399       Node* other_phi_input = in(j);
1400       if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory()) {
1401         // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1402         // This will allow the diamond to collapse completely if there are no other phis left.
1403         igvn->replace_node(this, merge_mem);
1404         return true;
1405       }
1406     }
1407   }
1408   return false;
1409 }

1410 //----------------------------check_cmove_id-----------------------------------
1411 // Check for CMove'ing a constant after comparing against the constant.
1412 // Happens all the time now, since if we compare equality vs a constant in
1413 // the parser, we "know" the variable is constant on one path and we force
1414 // it.  Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1415 // conditional move: "x = (x==0)?0:x;".  Yucko.  This fix is slightly more
1416 // general in that we don't need constants.  Since CMove's are only inserted
1417 // in very special circumstances, we do it here on generic Phi's.
1418 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1419   assert(true_path !=0, "only diamond shape graph expected");
1420 
1421   // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1422   // phi->region->if_proj->ifnode->bool->cmp
1423   Node*     region = in(0);
1424   Node*     iff    = region->in(1)->in(0);
1425   BoolNode* b      = iff->in(1)->as_Bool();
1426   Node*     cmp    = b->in(1);
1427   Node*     tval   = in(true_path);
1428   Node*     fval   = in(3-true_path);
1429   Node*     id     = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);

2010 
2011     if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2012     if (worklist.member(rc->in(0)->in(1))) {
2013       delay = true;
2014       break;
2015     }
2016 
2017     if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2018     if (worklist.member(rc->in(0)->in(1)->in(1))) {
2019       delay = true;
2020       break;
2021     }
2022   }
2023 
2024   if (delay) {
2025     worklist.push(this);
2026   }
2027   return delay;
2028 }
2029 














































2030 // If the Phi's Region is in an irreducible loop, and the Region
2031 // has had an input removed, but not yet transformed, it could be
2032 // that the Region (and this Phi) are not reachable from Root.
2033 // If we allow the Phi to collapse before the Region, this may lead
2034 // to dead-loop data. Wait for the Region to check for reachability,
2035 // and potentially remove the dead code.
2036 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2037   RegionNode* region = in(0)->as_Region();
2038   if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2039     Node* top = phase->C->top();
2040     for (uint j = 1; j < req(); j++) {
2041       Node* rc = region->in(j); // for each control input
2042       if (rc == nullptr || phase->type(rc) == Type::TOP) {
2043         // Region is missing a control input
2044         Node* n = in(j);
2045         if (n != nullptr && n != top) {
2046           // Phi still has its input, so region just lost its input
2047           return true;
2048         }
2049       }

2342           for (uint i = 1; i < req(); i++) {
2343             offset->init_req(i, in(i)->in(AddPNode::Offset));
2344           }
2345           phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2346         }
2347         return new AddPNode(base, address, offset);
2348       }
2349     }
2350   }
2351 
2352   // Split phis through memory merges, so that the memory merges will go away.
2353   // Piggy-back this transformation on the search for a unique input....
2354   // It will be as if the merged memory is the unique value of the phi.
2355   // (Do not attempt this optimization unless parsing is complete.
2356   // It would make the parser's memory-merge logic sick.)
2357   // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2358   if (progress == nullptr && can_reshape && type() == Type::MEMORY) {
2359     // see if this phi should be sliced
2360     uint merge_width = 0;
2361     bool saw_self = false;


2362     for( uint i=1; i<req(); ++i ) {// For all paths in
2363       Node *ii = in(i);
2364       // TOP inputs should not be counted as safe inputs because if the
2365       // Phi references itself through all other inputs then splitting the
2366       // Phi through memory merges would create dead loop at later stage.
2367       if (ii == top) {
2368         return nullptr; // Delay optimization until graph is cleaned.
2369       }
2370       if (ii->is_MergeMem()) {
2371         MergeMemNode* n = ii->as_MergeMem();
2372         merge_width = MAX2(merge_width, n->req());
2373         saw_self = saw_self || (n->base_memory() == this);


2374       }
2375     }
2376 
2377     // This restriction is temporarily necessary to ensure termination:
2378     if (!saw_self && adr_type() == TypePtr::BOTTOM)  merge_width = 0;
2379 
2380     if (merge_width > Compile::AliasIdxRaw) {
2381       // found at least one non-empty MergeMem
2382       const TypePtr* at = adr_type();
2383       if (at != TypePtr::BOTTOM) {
2384         // Patch the existing phi to select an input from the merge:
2385         // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2386         //     Phi:AT1(...m1...)
2387         int alias_idx = phase->C->get_alias_index(at);
2388         for (uint i=1; i<req(); ++i) {
2389           Node *ii = in(i);
2390           if (ii->is_MergeMem()) {
2391             MergeMemNode* n = ii->as_MergeMem();
2392             // compress paths and change unreachable cycles to TOP
2393             // If not, we can update the input infinitely along a MergeMem cycle
2394             // Equivalent code is in MemNode::Ideal_common
2395             Node *m  = phase->transform(n);
2396             if (outcnt() == 0) {  // Above transform() may kill us!
2397               return top;
2398             }

2429         if (!saw_safe_input) {
2430           // There is a dead loop: All inputs are either dead or reference back to this phi
2431           return top;
2432         }
2433 
2434         // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2435         //     MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2436         PhaseIterGVN* igvn = phase->is_IterGVN();
2437         assert(igvn != nullptr, "sanity check");
2438         Node* hook = new Node(1);
2439         PhiNode* new_base = (PhiNode*) clone();
2440         // Must eagerly register phis, since they participate in loops.
2441         igvn->register_new_node_with_optimizer(new_base);
2442         hook->add_req(new_base);
2443 
2444         MergeMemNode* result = MergeMemNode::make(new_base);
2445         for (uint i = 1; i < req(); ++i) {
2446           Node *ii = in(i);
2447           if (ii->is_MergeMem()) {
2448             MergeMemNode* n = ii->as_MergeMem();





2449             for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2450               // If we have not seen this slice yet, make a phi for it.
2451               bool made_new_phi = false;
2452               if (mms.is_empty()) {
2453                 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2454                 made_new_phi = true;
2455                 igvn->register_new_node_with_optimizer(new_phi);
2456                 hook->add_req(new_phi);
2457                 mms.set_memory(new_phi);
2458               }
2459               Node* phi = mms.memory();
2460               assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2461               phi->set_req(i, mms.memory2());
2462             }
2463           }
2464         }
2465         // Distribute all self-loops.
2466         { // (Extra braces to hide mms.)
2467           for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2468             Node* phi = mms.memory();

2547             if (is_decodeN) {
2548               new_ii = new EncodePNode(ii, narrow_t);
2549             } else {
2550               new_ii = new EncodePKlassNode(ii, narrow_t);
2551             }
2552             igvn->register_new_node_with_optimizer(new_ii);
2553           }
2554         }
2555         new_phi->set_req(i, new_ii);
2556       }
2557       igvn->register_new_node_with_optimizer(new_phi, this);
2558       if (is_decodeN) {
2559         progress = new DecodeNNode(new_phi, bottom_type());
2560       } else {
2561         progress = new DecodeNKlassNode(new_phi, bottom_type());
2562       }
2563     }
2564   }
2565 #endif
2566 





2567   // Try to convert a Phi with two duplicated convert nodes into a phi of the pre-conversion type and the convert node
2568   // proceeding the phi, to de-duplicate the convert node and compact the IR.
2569   if (can_reshape && progress == nullptr) {
2570     ConvertNode* convert = in(1)->isa_Convert();
2571     if (convert != nullptr) {
2572       int conv_op = convert->Opcode();
2573       bool ok = true;
2574 
2575       // Check the rest of the inputs
2576       for (uint i = 2; i < req(); i++) {
2577         // Make sure that all inputs are of the same type of convert node
2578         if (in(i)->Opcode() != conv_op) {
2579           ok = false;
2580           break;
2581         }
2582       }
2583 
2584       if (ok) {
2585         // Find the local bottom type to set as the type of the phi
2586         const Type* source_type = Type::get_const_basic_type(convert->in_type()->basic_type());

2590         // Set inputs to the new phi be the inputs of the convert
2591         for (uint i = 1; i < req(); i++) {
2592           newphi->init_req(i, in(i)->in(1));
2593         }
2594 
2595         phase->is_IterGVN()->register_new_node_with_optimizer(newphi, this);
2596 
2597         return ConvertNode::create_convert(get_convert_type(convert, source_type), get_convert_type(convert, dest_type), newphi);
2598       }
2599     }
2600   }
2601 
2602   // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2603   if (EnableVectorReboxing && can_reshape && progress == nullptr && type()->isa_oopptr()) {
2604     progress = merge_through_phi(this, phase->is_IterGVN());
2605   }
2606 
2607   return progress;              // Return any progress
2608 }
2609 































































































2610 static int compare_types(const Type* const& e1, const Type* const& e2) {
2611   return (intptr_t)e1 - (intptr_t)e2;
2612 }
2613 
2614 // Collect types at casts that are going to be eliminated at that Phi and store them in a TypeTuple.
2615 // Sort the types using an arbitrary order so a list of some types always hashes to the same TypeTuple (and TypeTuple
2616 // pointer comparison is enough to tell if 2 list of types are the same or not)
2617 const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
2618   const Node* region = in(0);
2619   const Type* phi_type = bottom_type();
2620   ResourceMark rm;
2621   GrowableArray<const Type*> types;
2622   for (uint i = 1; i < req(); i++) {
2623     if (region->in(i) == nullptr || phase->type(region->in(i)) == Type::TOP) {
2624       continue;
2625     }
2626     Node* in = Node::in(i);
2627     const Type* t = phase->type(in);
2628     if (in == nullptr || in == this || t == Type::TOP) {
2629       continue;

2972 #ifndef PRODUCT
2973 void CatchProjNode::dump_spec(outputStream *st) const {
2974   ProjNode::dump_spec(st);
2975   st->print("@bci %d ",_handler_bci);
2976 }
2977 #endif
2978 
2979 //=============================================================================
2980 //------------------------------Identity---------------------------------------
2981 // Check for CreateEx being Identity.
2982 Node* CreateExNode::Identity(PhaseGVN* phase) {
2983   if( phase->type(in(1)) == Type::TOP ) return in(1);
2984   if( phase->type(in(0)) == Type::TOP ) return in(0);
2985   if (phase->type(in(0)->in(0)) == Type::TOP) {
2986     assert(in(0)->is_CatchProj(), "control is CatchProj");
2987     return phase->C->top(); // dead code
2988   }
2989   // We only come from CatchProj, unless the CatchProj goes away.
2990   // If the CatchProj is optimized away, then we just carry the
2991   // exception oop through.






2992   CallNode *call = in(1)->in(0)->as_Call();
2993 
2994   return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
2995           in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
2996 }
2997 
2998 //=============================================================================
2999 //------------------------------Value------------------------------------------
3000 // Check for being unreachable.
3001 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
3002   if (!in(0) || in(0)->is_top()) return Type::TOP;
3003   return bottom_type();
3004 }
3005 
3006 //------------------------------Ideal------------------------------------------
3007 // Check for no longer being part of a loop
3008 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3009   if (can_reshape && !in(0)->is_Region()) {
3010     // Dead code elimination can sometimes delete this projection so
3011     // if it's not there, there's nothing to do.

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/inlinetypenode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/narrowptrnode.hpp"
  41 #include "opto/mulnode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/regalloc.hpp"
  44 #include "opto/regmask.hpp"
  45 #include "opto/runtime.hpp"
  46 #include "opto/subnode.hpp"
  47 #include "opto/vectornode.hpp"
  48 #include "utilities/vmError.hpp"
  49 
  50 // Portions of code courtesy of Clifford Click
  51 
  52 // Optimization - Graph Style
  53 
  54 //=============================================================================
  55 //------------------------------Value------------------------------------------
  56 // Compute the type of the RegionNode.

 502   if (left_path == nullptr || right_path == nullptr) {
 503     return false;
 504   }
 505   Node* diamond_if = left_path->in(0);
 506   if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
 507     // Not an IfNode merging a diamond or TOP.
 508     return false;
 509   }
 510 
 511   // Check for a proper bool/cmp
 512   const Node* bol = diamond_if->in(1);
 513   if (!bol->is_Bool()) {
 514     return false;
 515   }
 516   const Node* cmp = bol->in(1);
 517   if (!cmp->is_Cmp()) {
 518     return false;
 519   }
 520   return true;
 521 }
 522 
 523 //------------------------------Ideal------------------------------------------
 524 // Return a node which is more "ideal" than the current node.  Must preserve
 525 // the CFG, but we can still strip out dead paths.
 526 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 527   if( !can_reshape && !in(0) ) return nullptr;     // Already degraded to a Copy
 528   assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
 529 
 530   // Check for RegionNode with no Phi users and both inputs come from either
 531   // arm of the same IF.  If found, then the control-flow split is useless.
 532   bool has_phis = false;
 533   if (can_reshape) {            // Need DU info to check for Phi users
 534     try_clean_mem_phis(phase->is_IterGVN());
 535     has_phis = (has_phi() != nullptr);       // Cache result
 536 
 537     if (!has_phis) {            // No Phi users?  Nothing merging?
 538       for (uint i = 1; i < req()-1; i++) {
 539         Node *if1 = in(i);
 540         if( !if1 ) continue;
 541         Node *iff = if1->in(0);
 542         if( !iff || !iff->is_If() ) continue;

 949   if (iff1 == iff2) {
 950     igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
 951     igvn->replace_input_of(region, idx1, iff1->in(0));
 952     igvn->replace_input_of(region, idx2, igvn->C->top());
 953     return (region == this); // Remove useless if (both projections map to the same control/value)
 954   }
 955   BoolNode* bol1 = iff1->in(1)->isa_Bool();
 956   BoolNode* bol2 = iff2->in(1)->isa_Bool();
 957   if (bol1 == nullptr || bol2 == nullptr) {
 958     return false; // No bool inputs found
 959   }
 960   Node* cmp1 = bol1->in(1);
 961   Node* cmp2 = bol2->in(1);
 962   bool commute = false;
 963   if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
 964     return false; // No comparison
 965   } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
 966              cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
 967              cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
 968              cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
 969              cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() ||
 970              cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) {
 971     // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
 972     // SubTypeCheck is not commutative
 973     return false;
 974   } else if (cmp1 != cmp2) {
 975     if (cmp1->in(1) == cmp2->in(2) &&
 976         cmp1->in(2) == cmp2->in(1)) {
 977       commute = true; // Same but swapped inputs, commute the test
 978     } else {
 979       return false; // Ifs are not comparing the same values
 980     }
 981   }
 982   proj1 = proj1->other_if_proj();
 983   proj2 = proj2->other_if_proj();
 984   if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
 985          proj2->unique_ctrl_out_or_null() == this) ||
 986         (proj2->unique_ctrl_out_or_null() == iff1 &&
 987          proj1->unique_ctrl_out_or_null() == this))) {
 988     return false; // Ifs are not connected through other projs
 989   }
 990   // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged

1029     st->print("#reducible ");
1030     break;
1031   case RegionNode::LoopStatus::NeverIrreducibleEntry:
1032     break; // nothing
1033   }
1034 }
1035 #endif
1036 
1037 // Find the one non-null required input.  RegionNode only
1038 Node *Node::nonnull_req() const {
1039   assert( is_Region(), "" );
1040   for( uint i = 1; i < _cnt; i++ )
1041     if( in(i) )
1042       return in(i);
1043   ShouldNotReachHere();
1044   return nullptr;
1045 }
1046 
1047 
1048 //=============================================================================
1049 // note that these functions assume that the _adr_type field is flat
1050 uint PhiNode::hash() const {
1051   const Type* at = _adr_type;
1052   return TypeNode::hash() + (at ? at->hash() : 0);
1053 }
1054 bool PhiNode::cmp( const Node &n ) const {
1055   return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1056 }
1057 static inline
1058 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1059   if (at == nullptr || at == TypePtr::BOTTOM)  return at;
1060   return Compile::current()->alias_type(at)->adr_type();
1061 }
1062 
1063 //----------------------------make---------------------------------------------
1064 // create a new phi with edges matching r and set (initially) to x
1065 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1066   uint preds = r->req();   // Number of predecessor paths
1067   assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flat_accesses_share_alias()), "flatten at");
1068   PhiNode* p = new PhiNode(r, t, at);
1069   for (uint j = 1; j < preds; j++) {
1070     // Fill in all inputs, except those which the region does not yet have
1071     if (r->in(j) != nullptr)
1072       p->init_req(j, x);
1073   }
1074   return p;
1075 }
1076 PhiNode* PhiNode::make(Node* r, Node* x) {
1077   const Type*    t  = x->bottom_type();
1078   const TypePtr* at = nullptr;
1079   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
1080   return make(r, x, t, at);
1081 }
1082 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1083   const Type*    t  = x->bottom_type();
1084   const TypePtr* at = nullptr;
1085   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
1086   return new PhiNode(r, t, at);
1087 }

1175       np->as_Phi()->verify_adr_type(visited, at);
1176     } else if (n->bottom_type() == Type::TOP
1177                || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1178       // ignore top inputs
1179     } else {
1180       const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1181       // recheck phi/non-phi consistency at leaves:
1182       assert((nat != nullptr) == (at != nullptr), "");
1183       assert(nat == at || nat == TypePtr::BOTTOM,
1184              "adr_type must be consistent at leaves of phi nest");
1185     }
1186   }
1187 }
1188 
1189 // Verify a whole nest of phis rooted at this one.
1190 void PhiNode::verify_adr_type(bool recursive) const {
1191   if (VMError::is_error_reported())  return;  // muzzle asserts when debugging an error
1192   if (Node::in_dump())               return;  // muzzle asserts when printing
1193 
1194   assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");
1195   // Flat array element shouldn't get their own memory slice until flat_accesses_share_alias is cleared.
1196   // It could be the graph has no loads/stores and flat_accesses_share_alias is never cleared. EA could still
1197   // creates per element Phis but that wouldn't be a problem as there are no memory accesses for that array.
1198   assert(_adr_type == nullptr || _adr_type->isa_aryptr() == nullptr ||
1199          _adr_type->is_aryptr()->is_known_instance() ||
1200          !_adr_type->is_aryptr()->is_flat() ||
1201          !Compile::current()->flat_accesses_share_alias() ||
1202          _adr_type == TypeAryPtr::INLINES, "flat array element shouldn't get its own slice yet");
1203 
1204   if (!VerifyAliases)       return;  // verify thoroughly only if requested
1205 
1206   assert(_adr_type == flatten_phi_adr_type(_adr_type),
1207          "Phi::adr_type must be pre-normalized");
1208 
1209   if (recursive) {
1210     VectorSet visited;
1211     verify_adr_type(visited, _adr_type);
1212   }
1213 }
1214 #endif
1215 
1216 
1217 //------------------------------Value------------------------------------------
1218 // Compute the type of the PhiNode
1219 const Type* PhiNode::Value(PhaseGVN* phase) const {
1220   Node *r = in(0);              // RegionNode
1221   if( !r )                      // Copy or dead
1222     return in(1) ? phase->type(in(1)) : Type::TOP;

1401   assert(is_diamond_phi() > 0, "sanity");
1402   assert(req() == 3, "same as region");
1403   const Node* region = in(0);
1404   for (uint i = 1; i < 3; i++) {
1405     Node* phi_input = in(i);
1406     if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1407       // Nothing is control-dependent on path #i except the region itself.
1408       MergeMemNode* merge_mem = phi_input->as_MergeMem();
1409       uint j = 3 - i;
1410       Node* other_phi_input = in(j);
1411       if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory()) {
1412         // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1413         // This will allow the diamond to collapse completely if there are no other phis left.
1414         igvn->replace_node(this, merge_mem);
1415         return true;
1416       }
1417     }
1418   }
1419   return false;
1420 }
1421 
1422 //----------------------------check_cmove_id-----------------------------------
1423 // Check for CMove'ing a constant after comparing against the constant.
1424 // Happens all the time now, since if we compare equality vs a constant in
1425 // the parser, we "know" the variable is constant on one path and we force
1426 // it.  Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1427 // conditional move: "x = (x==0)?0:x;".  Yucko.  This fix is slightly more
1428 // general in that we don't need constants.  Since CMove's are only inserted
1429 // in very special circumstances, we do it here on generic Phi's.
1430 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1431   assert(true_path !=0, "only diamond shape graph expected");
1432 
1433   // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1434   // phi->region->if_proj->ifnode->bool->cmp
1435   Node*     region = in(0);
1436   Node*     iff    = region->in(1)->in(0);
1437   BoolNode* b      = iff->in(1)->as_Bool();
1438   Node*     cmp    = b->in(1);
1439   Node*     tval   = in(true_path);
1440   Node*     fval   = in(3-true_path);
1441   Node*     id     = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);

2022 
2023     if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2024     if (worklist.member(rc->in(0)->in(1))) {
2025       delay = true;
2026       break;
2027     }
2028 
2029     if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2030     if (worklist.member(rc->in(0)->in(1)->in(1))) {
2031       delay = true;
2032       break;
2033     }
2034   }
2035 
2036   if (delay) {
2037     worklist.push(this);
2038   }
2039   return delay;
2040 }
2041 
2042 // Push inline type input nodes (and null) down through the phi recursively (can handle data loops).
2043 InlineTypeNode* PhiNode::push_inline_types_down(PhaseGVN* phase, bool can_reshape, ciInlineKlass* inline_klass) {
2044   assert(inline_klass != nullptr, "must be");
2045   InlineTypeNode* vt = InlineTypeNode::make_null(*phase, inline_klass, /* transform = */ false)->clone_with_phis(phase, in(0), nullptr, !_type->maybe_null());
2046   if (can_reshape) {
2047     // Replace phi right away to be able to use the inline
2048     // type node when reaching the phi again through data loops.
2049     PhaseIterGVN* igvn = phase->is_IterGVN();
2050     for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2051       Node* u = fast_out(i);
2052       igvn->rehash_node_delayed(u);
2053       imax -= u->replace_edge(this, vt);
2054       --i;
2055     }
2056     igvn->rehash_node_delayed(this);
2057     assert(outcnt() == 0, "should be dead now");
2058   }
2059   ResourceMark rm;
2060   Node_List casts;
2061   for (uint i = 1; i < req(); ++i) {
2062     Node* n = in(i);
2063     while (n->is_ConstraintCast()) {
2064       casts.push(n);
2065       n = n->in(1);
2066     }
2067     if (phase->type(n)->is_zero_type()) {
2068       n = InlineTypeNode::make_null(*phase, inline_klass);
2069     } else if (n->is_Phi()) {
2070       assert(can_reshape, "can only handle phis during IGVN");
2071       n = phase->transform(n->as_Phi()->push_inline_types_down(phase, can_reshape, inline_klass));
2072     }
2073     while (casts.size() != 0) {
2074       // Push the cast(s) through the InlineTypeNode
2075       // TODO 8302217 Can we avoid cloning? See InlineTypeNode::clone_if_required
2076       Node* cast = casts.pop()->clone();
2077       cast->set_req_X(1, n->as_InlineType()->get_oop(), phase);
2078       n = n->clone();
2079       n->as_InlineType()->set_oop(*phase, phase->transform(cast));
2080       n = phase->transform(n);
2081     }
2082     bool transform = !can_reshape && (i == (req()-1)); // Transform phis on last merge
2083     vt->merge_with(phase, n->as_InlineType(), i, transform);
2084   }
2085   return vt;
2086 }
2087 
2088 // If the Phi's Region is in an irreducible loop, and the Region
2089 // has had an input removed, but not yet transformed, it could be
2090 // that the Region (and this Phi) are not reachable from Root.
2091 // If we allow the Phi to collapse before the Region, this may lead
2092 // to dead-loop data. Wait for the Region to check for reachability,
2093 // and potentially remove the dead code.
2094 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2095   RegionNode* region = in(0)->as_Region();
2096   if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2097     Node* top = phase->C->top();
2098     for (uint j = 1; j < req(); j++) {
2099       Node* rc = region->in(j); // for each control input
2100       if (rc == nullptr || phase->type(rc) == Type::TOP) {
2101         // Region is missing a control input
2102         Node* n = in(j);
2103         if (n != nullptr && n != top) {
2104           // Phi still has its input, so region just lost its input
2105           return true;
2106         }
2107       }

2400           for (uint i = 1; i < req(); i++) {
2401             offset->init_req(i, in(i)->in(AddPNode::Offset));
2402           }
2403           phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2404         }
2405         return new AddPNode(base, address, offset);
2406       }
2407     }
2408   }
2409 
2410   // Split phis through memory merges, so that the memory merges will go away.
2411   // Piggy-back this transformation on the search for a unique input....
2412   // It will be as if the merged memory is the unique value of the phi.
2413   // (Do not attempt this optimization unless parsing is complete.
2414   // It would make the parser's memory-merge logic sick.)
2415   // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2416   if (progress == nullptr && can_reshape && type() == Type::MEMORY) {
2417     // see if this phi should be sliced
2418     uint merge_width = 0;
2419     bool saw_self = false;
2420     // TODO revisit this with JDK-8247216
2421     bool mergemem_only = true;
2422     for( uint i=1; i<req(); ++i ) {// For all paths in
2423       Node *ii = in(i);
2424       // TOP inputs should not be counted as safe inputs because if the
2425       // Phi references itself through all other inputs then splitting the
2426       // Phi through memory merges would create dead loop at later stage.
2427       if (ii == top) {
2428         return nullptr; // Delay optimization until graph is cleaned.
2429       }
2430       if (ii->is_MergeMem()) {
2431         MergeMemNode* n = ii->as_MergeMem();
2432         merge_width = MAX2(merge_width, n->req());
2433         saw_self = saw_self || (n->base_memory() == this);
2434       } else {
2435         mergemem_only = false;
2436       }
2437     }
2438 
2439     // This restriction is temporarily necessary to ensure termination:
2440     if (!mergemem_only && !saw_self && adr_type() == TypePtr::BOTTOM)  merge_width = 0;
2441 
2442     if (merge_width > Compile::AliasIdxRaw) {
2443       // found at least one non-empty MergeMem
2444       const TypePtr* at = adr_type();
2445       if (at != TypePtr::BOTTOM) {
2446         // Patch the existing phi to select an input from the merge:
2447         // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2448         //     Phi:AT1(...m1...)
2449         int alias_idx = phase->C->get_alias_index(at);
2450         for (uint i=1; i<req(); ++i) {
2451           Node *ii = in(i);
2452           if (ii->is_MergeMem()) {
2453             MergeMemNode* n = ii->as_MergeMem();
2454             // compress paths and change unreachable cycles to TOP
2455             // If not, we can update the input infinitely along a MergeMem cycle
2456             // Equivalent code is in MemNode::Ideal_common
2457             Node *m  = phase->transform(n);
2458             if (outcnt() == 0) {  // Above transform() may kill us!
2459               return top;
2460             }

2491         if (!saw_safe_input) {
2492           // There is a dead loop: All inputs are either dead or reference back to this phi
2493           return top;
2494         }
2495 
2496         // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2497         //     MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2498         PhaseIterGVN* igvn = phase->is_IterGVN();
2499         assert(igvn != nullptr, "sanity check");
2500         Node* hook = new Node(1);
2501         PhiNode* new_base = (PhiNode*) clone();
2502         // Must eagerly register phis, since they participate in loops.
2503         igvn->register_new_node_with_optimizer(new_base);
2504         hook->add_req(new_base);
2505 
2506         MergeMemNode* result = MergeMemNode::make(new_base);
2507         for (uint i = 1; i < req(); ++i) {
2508           Node *ii = in(i);
2509           if (ii->is_MergeMem()) {
2510             MergeMemNode* n = ii->as_MergeMem();
2511             if (igvn) {
2512               // TODO revisit this with JDK-8247216
2513               // Put 'n' on the worklist because it might be modified by MergeMemStream::iteration_setup
2514               igvn->_worklist.push(n);
2515             }
2516             for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2517               // If we have not seen this slice yet, make a phi for it.
2518               bool made_new_phi = false;
2519               if (mms.is_empty()) {
2520                 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2521                 made_new_phi = true;
2522                 igvn->register_new_node_with_optimizer(new_phi);
2523                 hook->add_req(new_phi);
2524                 mms.set_memory(new_phi);
2525               }
2526               Node* phi = mms.memory();
2527               assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2528               phi->set_req(i, mms.memory2());
2529             }
2530           }
2531         }
2532         // Distribute all self-loops.
2533         { // (Extra braces to hide mms.)
2534           for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2535             Node* phi = mms.memory();

2614             if (is_decodeN) {
2615               new_ii = new EncodePNode(ii, narrow_t);
2616             } else {
2617               new_ii = new EncodePKlassNode(ii, narrow_t);
2618             }
2619             igvn->register_new_node_with_optimizer(new_ii);
2620           }
2621         }
2622         new_phi->set_req(i, new_ii);
2623       }
2624       igvn->register_new_node_with_optimizer(new_phi, this);
2625       if (is_decodeN) {
2626         progress = new DecodeNNode(new_phi, bottom_type());
2627       } else {
2628         progress = new DecodeNKlassNode(new_phi, bottom_type());
2629       }
2630     }
2631   }
2632 #endif
2633 
2634   Node* inline_type = try_push_inline_types_down(phase, can_reshape);
2635   if (inline_type != this) {
2636     return inline_type;
2637   }
2638 
2639   // Try to convert a Phi with two duplicated convert nodes into a phi of the pre-conversion type and the convert node
2640   // proceeding the phi, to de-duplicate the convert node and compact the IR.
2641   if (can_reshape && progress == nullptr) {
2642     ConvertNode* convert = in(1)->isa_Convert();
2643     if (convert != nullptr) {
2644       int conv_op = convert->Opcode();
2645       bool ok = true;
2646 
2647       // Check the rest of the inputs
2648       for (uint i = 2; i < req(); i++) {
2649         // Make sure that all inputs are of the same type of convert node
2650         if (in(i)->Opcode() != conv_op) {
2651           ok = false;
2652           break;
2653         }
2654       }
2655 
2656       if (ok) {
2657         // Find the local bottom type to set as the type of the phi
2658         const Type* source_type = Type::get_const_basic_type(convert->in_type()->basic_type());

2662         // Set inputs to the new phi be the inputs of the convert
2663         for (uint i = 1; i < req(); i++) {
2664           newphi->init_req(i, in(i)->in(1));
2665         }
2666 
2667         phase->is_IterGVN()->register_new_node_with_optimizer(newphi, this);
2668 
2669         return ConvertNode::create_convert(get_convert_type(convert, source_type), get_convert_type(convert, dest_type), newphi);
2670       }
2671     }
2672   }
2673 
2674   // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2675   if (EnableVectorReboxing && can_reshape && progress == nullptr && type()->isa_oopptr()) {
2676     progress = merge_through_phi(this, phase->is_IterGVN());
2677   }
2678 
2679   return progress;              // Return any progress
2680 }
2681 
2682 // Check recursively if inputs are either an inline type, constant null
2683 // or another Phi (including self references through data loops). If so,
2684 // push the inline types down through the phis to enable folding of loads.
2685 Node* PhiNode::try_push_inline_types_down(PhaseGVN* phase, const bool can_reshape) {
2686   if (!can_be_inline_type()) {
2687     return this;
2688   }
2689 
2690   ciInlineKlass* inline_klass;
2691   if (can_push_inline_types_down(phase, can_reshape, inline_klass)) {
2692     assert(inline_klass != nullptr, "must be");
2693     return push_inline_types_down(phase, can_reshape, inline_klass);
2694   }
2695   return this;
2696 }
2697 
2698 bool PhiNode::can_push_inline_types_down(PhaseGVN* phase, const bool can_reshape, ciInlineKlass*& inline_klass) {
2699   if (req() <= 2) {
2700     // Dead phi.
2701     return false;
2702   }
2703   inline_klass = nullptr;
2704 
2705   // TODO 8302217 We need to prevent endless pushing through
2706   bool only_phi = (outcnt() != 0);
2707   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2708     Node* n = fast_out(i);
2709     if (n->is_InlineType() && n->in(1) == this) {
2710       return false;
2711     }
2712     if (!n->is_Phi()) {
2713       only_phi = false;
2714     }
2715   }
2716   if (only_phi) {
2717     return false;
2718   }
2719 
2720   ResourceMark rm;
2721   Unique_Node_List worklist;
2722   worklist.push(this);
2723   Node_List casts;
2724 
2725   for (uint next = 0; next < worklist.size(); next++) {
2726     Node* phi = worklist.at(next);
2727     for (uint i = 1; i < phi->req(); i++) {
2728       Node* n = phi->in(i);
2729       if (n == nullptr) {
2730         return false;
2731       }
2732       while (n->is_ConstraintCast()) {
2733         if (n->in(0) != nullptr && n->in(0)->is_top()) {
2734           // Will die, don't optimize
2735           return false;
2736         }
2737         casts.push(n);
2738         n = n->in(1);
2739       }
2740       const Type* type = phase->type(n);
2741       if (n->is_InlineType() && (inline_klass == nullptr || inline_klass == type->inline_klass())) {
2742         inline_klass = type->inline_klass();
2743       } else if (n->is_Phi() && can_reshape && n->bottom_type()->isa_ptr()) {
2744         worklist.push(n);
2745       } else if (!type->is_zero_type()) {
2746         return false;
2747       }
2748     }
2749   }
2750   if (inline_klass == nullptr) {
2751     return false;
2752   }
2753 
2754   // Check if cast nodes can be pushed through
2755   const Type* t = Type::get_const_type(inline_klass);
2756   while (casts.size() != 0 && t != nullptr) {
2757     Node* cast = casts.pop();
2758     if (t->filter(cast->bottom_type()) == Type::TOP) {
2759       return false;
2760     }
2761   }
2762 
2763   return true;
2764 }
2765 
2766 #ifdef ASSERT
2767 bool PhiNode::can_push_inline_types_down(PhaseGVN* phase) {
2768   if (!can_be_inline_type()) {
2769     return false;
2770   }
2771 
2772   ciInlineKlass* inline_klass;
2773   return can_push_inline_types_down(phase, true, inline_klass);
2774 }
2775 #endif // ASSERT
2776 
2777 static int compare_types(const Type* const& e1, const Type* const& e2) {
2778   return (intptr_t)e1 - (intptr_t)e2;
2779 }
2780 
2781 // Collect types at casts that are going to be eliminated at that Phi and store them in a TypeTuple.
2782 // Sort the types using an arbitrary order so a list of some types always hashes to the same TypeTuple (and TypeTuple
2783 // pointer comparison is enough to tell if 2 list of types are the same or not)
2784 const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
2785   const Node* region = in(0);
2786   const Type* phi_type = bottom_type();
2787   ResourceMark rm;
2788   GrowableArray<const Type*> types;
2789   for (uint i = 1; i < req(); i++) {
2790     if (region->in(i) == nullptr || phase->type(region->in(i)) == Type::TOP) {
2791       continue;
2792     }
2793     Node* in = Node::in(i);
2794     const Type* t = phase->type(in);
2795     if (in == nullptr || in == this || t == Type::TOP) {
2796       continue;

3139 #ifndef PRODUCT
3140 void CatchProjNode::dump_spec(outputStream *st) const {
3141   ProjNode::dump_spec(st);
3142   st->print("@bci %d ",_handler_bci);
3143 }
3144 #endif
3145 
3146 //=============================================================================
3147 //------------------------------Identity---------------------------------------
3148 // Check for CreateEx being Identity.
3149 Node* CreateExNode::Identity(PhaseGVN* phase) {
3150   if( phase->type(in(1)) == Type::TOP ) return in(1);
3151   if( phase->type(in(0)) == Type::TOP ) return in(0);
3152   if (phase->type(in(0)->in(0)) == Type::TOP) {
3153     assert(in(0)->is_CatchProj(), "control is CatchProj");
3154     return phase->C->top(); // dead code
3155   }
3156   // We only come from CatchProj, unless the CatchProj goes away.
3157   // If the CatchProj is optimized away, then we just carry the
3158   // exception oop through.
3159 
3160   // CheckCastPPNode::Ideal() for inline types reuses the exception
3161   // paths of a call to perform an allocation: we can see a Phi here.
3162   if (in(1)->is_Phi()) {
3163     return this;
3164   }
3165   CallNode *call = in(1)->in(0)->as_Call();
3166 
3167   return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
3168           in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
3169 }
3170 
3171 //=============================================================================
3172 //------------------------------Value------------------------------------------
3173 // Check for being unreachable.
3174 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
3175   if (!in(0) || in(0)->is_top()) return Type::TOP;
3176   return bottom_type();
3177 }
3178 
3179 //------------------------------Ideal------------------------------------------
3180 // Check for no longer being part of a loop
3181 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3182   if (can_reshape && !in(0)->is_Region()) {
3183     // Dead code elimination can sometimes delete this projection so
3184     // if it's not there, there's nothing to do.
< prev index next >