< prev index next >

src/hotspot/share/opto/cfgnode.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "gc/shared/barrierSet.hpp"
  26 #include "gc/shared/c2/barrierSetC2.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "oops/objArrayKlass.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/cfgnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/convertnode.hpp"

  35 #include "opto/loopnode.hpp"
  36 #include "opto/machnode.hpp"
  37 #include "opto/movenode.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/narrowptrnode.hpp"
  40 #include "opto/phaseX.hpp"
  41 #include "opto/regalloc.hpp"
  42 #include "opto/regmask.hpp"
  43 #include "opto/runtime.hpp"
  44 #include "opto/subnode.hpp"
  45 #include "opto/vectornode.hpp"
  46 #include "utilities/vmError.hpp"
  47 
  48 // Portions of code courtesy of Clifford Click
  49 
  50 // Optimization - Graph Style
  51 
  52 //=============================================================================
  53 //------------------------------Value------------------------------------------
  54 // Compute the type of the RegionNode.

 503   if (left_path == nullptr || right_path == nullptr) {
 504     return false;
 505   }
 506   Node* diamond_if = left_path->in(0);
 507   if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
 508     // Not an IfNode merging a diamond or TOP.
 509     return false;
 510   }
 511 
 512   // Check for a proper bool/cmp
 513   const Node* bol = diamond_if->in(1);
 514   if (!bol->is_Bool()) {
 515     return false;
 516   }
 517   const Node* cmp = bol->in(1);
 518   if (!cmp->is_Cmp()) {
 519     return false;
 520   }
 521   return true;
 522 }

 523 //------------------------------Ideal------------------------------------------
 524 // Return a node which is more "ideal" than the current node.  Must preserve
 525 // the CFG, but we can still strip out dead paths.
 526 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 527   if( !can_reshape && !in(0) ) return nullptr;     // Already degraded to a Copy
 528   assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
 529 
 530   // Check for RegionNode with no Phi users and both inputs come from either
 531   // arm of the same IF.  If found, then the control-flow split is useless.
 532   bool has_phis = false;
 533   if (can_reshape) {            // Need DU info to check for Phi users
 534     try_clean_mem_phis(phase->is_IterGVN());
 535     has_phis = (has_phi() != nullptr);       // Cache result
 536 
 537     if (!has_phis) {            // No Phi users?  Nothing merging?
 538       for (uint i = 1; i < req()-1; i++) {
 539         Node *if1 = in(i);
 540         if( !if1 ) continue;
 541         Node *iff = if1->in(0);
 542         if( !iff || !iff->is_If() ) continue;

 949   if (iff1 == iff2) {
 950     igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
 951     igvn->replace_input_of(region, idx1, iff1->in(0));
 952     igvn->replace_input_of(region, idx2, igvn->C->top());
 953     return (region == this); // Remove useless if (both projections map to the same control/value)
 954   }
 955   BoolNode* bol1 = iff1->in(1)->isa_Bool();
 956   BoolNode* bol2 = iff2->in(1)->isa_Bool();
 957   if (bol1 == nullptr || bol2 == nullptr) {
 958     return false; // No bool inputs found
 959   }
 960   Node* cmp1 = bol1->in(1);
 961   Node* cmp2 = bol2->in(1);
 962   bool commute = false;
 963   if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
 964     return false; // No comparison
 965   } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
 966              cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
 967              cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
 968              cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
 969              cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck()) {

 970     // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
 971     // SubTypeCheck is not commutative
 972     return false;
 973   } else if (cmp1 != cmp2) {
 974     if (cmp1->in(1) == cmp2->in(2) &&
 975         cmp1->in(2) == cmp2->in(1)) {
 976       commute = true; // Same but swapped inputs, commute the test
 977     } else {
 978       return false; // Ifs are not comparing the same values
 979     }
 980   }
 981   proj1 = proj1->other_if_proj();
 982   proj2 = proj2->other_if_proj();
 983   if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
 984          proj2->unique_ctrl_out_or_null() == this) ||
 985         (proj2->unique_ctrl_out_or_null() == iff1 &&
 986          proj1->unique_ctrl_out_or_null() == this))) {
 987     return false; // Ifs are not connected through other projs
 988   }
 989   // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged

1028     st->print("#reducible ");
1029     break;
1030   case RegionNode::LoopStatus::NeverIrreducibleEntry:
1031     break; // nothing
1032   }
1033 }
1034 #endif
1035 
1036 // Find the one non-null required input.  RegionNode only
1037 Node *Node::nonnull_req() const {
1038   assert( is_Region(), "" );
1039   for( uint i = 1; i < _cnt; i++ )
1040     if( in(i) )
1041       return in(i);
1042   ShouldNotReachHere();
1043   return nullptr;
1044 }
1045 
1046 
1047 //=============================================================================
1048 // note that these functions assume that the _adr_type field is flattened
1049 uint PhiNode::hash() const {
1050   const Type* at = _adr_type;
1051   return TypeNode::hash() + (at ? at->hash() : 0);
1052 }
1053 bool PhiNode::cmp( const Node &n ) const {
1054   return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1055 }
1056 static inline
1057 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1058   if (at == nullptr || at == TypePtr::BOTTOM)  return at;
1059   return Compile::current()->alias_type(at)->adr_type();
1060 }
1061 
1062 //----------------------------make---------------------------------------------
1063 // create a new phi with edges matching r and set (initially) to x
1064 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1065   uint preds = r->req();   // Number of predecessor paths
1066   assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at");
1067   PhiNode* p = new PhiNode(r, t, at);
1068   for (uint j = 1; j < preds; j++) {
1069     // Fill in all inputs, except those which the region does not yet have
1070     if (r->in(j) != nullptr)
1071       p->init_req(j, x);
1072   }
1073   return p;
1074 }
1075 PhiNode* PhiNode::make(Node* r, Node* x) {
1076   const Type*    t  = x->bottom_type();
1077   const TypePtr* at = nullptr;
1078   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
1079   return make(r, x, t, at);
1080 }
1081 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1082   const Type*    t  = x->bottom_type();
1083   const TypePtr* at = nullptr;
1084   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
1085   return new PhiNode(r, t, at);
1086 }

1175       np->as_Phi()->verify_adr_type(visited, at);
1176     } else if (n->bottom_type() == Type::TOP
1177                || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1178       // ignore top inputs
1179     } else {
1180       const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1181       // recheck phi/non-phi consistency at leaves:
1182       assert((nat != nullptr) == (at != nullptr), "");
1183       assert(nat == at || nat == TypePtr::BOTTOM,
1184              "adr_type must be consistent at leaves of phi nest");
1185     }
1186   }
1187 }
1188 
1189 // Verify a whole nest of phis rooted at this one.
1190 void PhiNode::verify_adr_type(bool recursive) const {
1191   if (VMError::is_error_reported())  return;  // muzzle asserts when debugging an error
1192   if (Node::in_dump())               return;  // muzzle asserts when printing
1193 
1194   assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");








1195 
1196   if (!VerifyAliases)       return;  // verify thoroughly only if requested
1197 
1198   assert(_adr_type == flatten_phi_adr_type(_adr_type),
1199          "Phi::adr_type must be pre-normalized");
1200 
1201   if (recursive) {
1202     VectorSet visited;
1203     verify_adr_type(visited, _adr_type);
1204   }
1205 }
1206 #endif
1207 
1208 
1209 //------------------------------Value------------------------------------------
1210 // Compute the type of the PhiNode
1211 const Type* PhiNode::Value(PhaseGVN* phase) const {
1212   Node *r = in(0);              // RegionNode
1213   if( !r )                      // Copy or dead
1214     return in(1) ? phase->type(in(1)) : Type::TOP;

1458   assert(req() == 3, "same as region");
1459   RegionNode* region = in(0)->as_Region();
1460   for (uint i = 1; i < 3; i++) {
1461     Node* phi_input = in(i);
1462     if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1463       // Nothing is control-dependent on path #i except the region itself.
1464       MergeMemNode* merge_mem = phi_input->as_MergeMem();
1465       uint j = 3 - i;
1466       Node* other_phi_input = in(j);
1467       if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory() && !is_data_loop(region, phi_input, igvn)) {
1468         // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1469         // Only proceed if the transformation doesn't create a data loop
1470         // This will allow the diamond to collapse completely if there are no other phis left.
1471         igvn->replace_node(this, merge_mem);
1472         return true;
1473       }
1474     }
1475   }
1476   return false;
1477 }

1478 //----------------------------check_cmove_id-----------------------------------
1479 // Check for CMove'ing a constant after comparing against the constant.
1480 // Happens all the time now, since if we compare equality vs a constant in
1481 // the parser, we "know" the variable is constant on one path and we force
1482 // it.  Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1483 // conditional move: "x = (x==0)?0:x;".  Yucko.  This fix is slightly more
1484 // general in that we don't need constants.  Since CMove's are only inserted
1485 // in very special circumstances, we do it here on generic Phi's.
1486 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1487   assert(true_path !=0, "only diamond shape graph expected");
1488 
1489   // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1490   // phi->region->if_proj->ifnode->bool->cmp
1491   Node*     region = in(0);
1492   Node*     iff    = region->in(1)->in(0);
1493   BoolNode* b      = iff->in(1)->as_Bool();
1494   Node*     cmp    = b->in(1);
1495   Node*     tval   = in(true_path);
1496   Node*     fval   = in(3-true_path);
1497   Node*     id     = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);

1512   }
1513 
1514   return id;
1515 }
1516 
1517 //------------------------------Identity---------------------------------------
1518 // Check for Region being Identity.
1519 Node* PhiNode::Identity(PhaseGVN* phase) {
1520   if (must_wait_for_region_in_irreducible_loop(phase)) {
1521     return this;
1522   }
1523   // Check for no merging going on
1524   // (There used to be special-case code here when this->region->is_Loop.
1525   // It would check for a tributary phi on the backedge that the main phi
1526   // trivially, perhaps with a single cast.  The unique_input method
1527   // does all this and more, by reducing such tributaries to 'this'.)
1528   Node* uin = unique_input(phase, false);
1529   if (uin != nullptr) {
1530     return uin;
1531   }




1532 
1533   int true_path = is_diamond_phi();
1534   // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet.
1535   if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) {
1536     Node* id = is_cmove_id(phase, true_path);
1537     if (id != nullptr) {
1538       return id;
1539     }
1540   }
1541 
1542   // Looking for phis with identical inputs.  If we find one that has
1543   // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1544   if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1545       TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1546     uint phi_len = req();
1547     Node* phi_reg = region();
1548     for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1549       Node* u = phi_reg->fast_out(i);
1550       assert(!u->is_Phi() || u->in(0) == phi_reg, "broken Phi/Region subgraph");
1551       if (u->is_Phi() && u->req() == phi_len && can_be_replaced_by(u->as_Phi())) {

1602     }
1603     // Check for a unique input (maybe uncasted)
1604     if (input == nullptr) {
1605       input = un;
1606     } else if (input != un) {
1607       input = NodeSentinel; // no unique input
1608     }
1609   }
1610   if (input == nullptr) {
1611     return phase->C->top();        // no inputs
1612   }
1613 
1614   if (input != NodeSentinel) {
1615     return input;           // one unique direct input
1616   }
1617 
1618   // Nothing.
1619   return nullptr;
1620 }
1621 




































1622 //------------------------------is_x2logic-------------------------------------
1623 // Check for simple convert-to-boolean pattern
1624 // If:(C Bool) Region:(IfF IfT) Phi:(Region 0 1)
1625 // Convert Phi to an ConvIB.
1626 static Node *is_x2logic( PhaseGVN *phase, PhiNode *phi, int true_path ) {
1627   assert(true_path !=0, "only diamond shape graph expected");
1628 
1629   // If we're late in the optimization process, we may have already expanded Conv2B nodes
1630   if (phase->C->post_loop_opts_phase() && !Matcher::match_rule_supported(Op_Conv2B)) {
1631     return nullptr;
1632   }
1633 
1634   // Convert the true/false index into an expected 0/1 return.
1635   // Map 2->0 and 1->1.
1636   int flipped = 2-true_path;
1637 
1638   // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1639   // phi->region->if_proj->ifnode->bool->cmp
1640   Node *region = phi->in(0);
1641   Node *iff = region->in(1)->in(0);

2069 
2070     if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2071     if (worklist.member(rc->in(0)->in(1))) {
2072       delay = true;
2073       break;
2074     }
2075 
2076     if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2077     if (worklist.member(rc->in(0)->in(1)->in(1))) {
2078       delay = true;
2079       break;
2080     }
2081   }
2082 
2083   if (delay) {
2084     worklist.push(this);
2085   }
2086   return delay;
2087 }
2088 




















































2089 // If the Phi's Region is in an irreducible loop, and the Region
2090 // has had an input removed, but not yet transformed, it could be
2091 // that the Region (and this Phi) are not reachable from Root.
2092 // If we allow the Phi to collapse before the Region, this may lead
2093 // to dead-loop data. Wait for the Region to check for reachability,
2094 // and potentially remove the dead code.
2095 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2096   RegionNode* region = in(0)->as_Region();
2097   if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2098     Node* top = phase->C->top();
2099     for (uint j = 1; j < req(); j++) {
2100       Node* rc = region->in(j); // for each control input
2101       if (rc == nullptr || phase->type(rc) == Type::TOP) {
2102         // Region is missing a control input
2103         Node* n = in(j);
2104         if (n != nullptr && n != top) {
2105           // Phi still has its input, so region just lost its input
2106           return true;
2107         }
2108       }

2505     //                Phi (this)   |
2506     //                 |           |
2507     //                 +-----------+
2508     //
2509     // Generally, there are issues with non-termination with such circularity
2510     // (see comment further below). However, if there is a direct loop to self,
2511     // splitting the Phi through the MergeMem will result in the below.
2512     //
2513     //               +---+
2514     //               |   |
2515     //               v   |
2516     //              Phi  |
2517     //               |\  |
2518     //               | +-+
2519     // (base_memory) v
2520     //              MergeMem
2521     //
2522     // This split breaks the circularity and consequently does not lead to
2523     // non-termination.
2524     uint merge_width = 0;


2525     bool split_always_terminates = false; // Is splitting guaranteed to terminate?
2526     for( uint i=1; i<req(); ++i ) {// For all paths in
2527       Node *ii = in(i);
2528       // TOP inputs should not be counted as safe inputs because if the
2529       // Phi references itself through all other inputs then splitting the
2530       // Phi through memory merges would create dead loop at later stage.
2531       if (ii == top) {
2532         return nullptr; // Delay optimization until graph is cleaned.
2533       }
2534       if (ii->is_MergeMem()) {
2535         MergeMemNode* n = ii->as_MergeMem();
2536         merge_width = MAX2(merge_width, n->req());
2537         if (n->base_memory() == this) {
2538           split_always_terminates = true;
2539         }


2540       }
2541     }
2542 
2543     // There are cases with circular dependencies between bottom Phis
2544     // and MergeMems. Below is a minimal example.
2545     //
2546     //               +------------+
2547     //               |            |
2548     // (base_memory) v            |
2549     //              MergeMem      |
2550     //                 |          |
2551     //                 v          |
2552     //                Phi (this)  |
2553     //                 |          |
2554     //                 v          |
2555     //                Phi         |
2556     //                 |          |
2557     //                 +----------+
2558     //
2559     // Here, we cannot break the circularity through a self-loop as there
2560     // are two Phis involved. Repeatedly splitting the Phis through the
2561     // MergeMem leads to non-termination. We check for non-termination below.
2562     // Only check for non-termination if necessary.
2563     if (!split_always_terminates && adr_type() == TypePtr::BOTTOM &&
2564         merge_width > Compile::AliasIdxRaw) {
2565       split_always_terminates = is_split_through_mergemem_terminating();
2566     }
2567 
2568     if (merge_width > Compile::AliasIdxRaw) {
2569       // found at least one non-empty MergeMem
2570       const TypePtr* at = adr_type();
2571       if (at != TypePtr::BOTTOM) {
2572         // Patch the existing phi to select an input from the merge:
2573         // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2574         //     Phi:AT1(...m1...)
2575         int alias_idx = phase->C->get_alias_index(at);
2576         for (uint i=1; i<req(); ++i) {
2577           Node *ii = in(i);
2578           if (ii->is_MergeMem()) {
2579             MergeMemNode* n = ii->as_MergeMem();
2580             // compress paths and change unreachable cycles to TOP
2581             // If not, we can update the input infinitely along a MergeMem cycle
2582             // Equivalent code is in MemNode::Ideal_common
2583             Node *m  = phase->transform(n);
2584             if (outcnt() == 0) {  // Above transform() may kill us!
2585               return top;
2586             }
2587             // If transformed to a MergeMem, get the desired slice
2588             // Otherwise the returned node represents memory for every slice
2589             Node *new_mem = (m->is_MergeMem()) ?
2590                              m->as_MergeMem()->memory_at(alias_idx) : m;
2591             // Update input if it is progress over what we have now
2592             if (new_mem != ii) {
2593               set_req_X(i, new_mem, phase->is_IterGVN());
2594               progress = this;
2595             }
2596           }
2597         }
2598       } else if (split_always_terminates) {
2599         // If all inputs reference this phi (directly or through data nodes) -
2600         // it is a dead loop.
2601         bool saw_safe_input = false;
2602         for (uint j = 1; j < req(); ++j) {
2603           Node* n = in(j);
2604           if (n->is_MergeMem()) {
2605             MergeMemNode* mm = n->as_MergeMem();
2606             if (mm->base_memory() == this || mm->base_memory() == mm->empty_memory()) {
2607               // Skip this input if it references back to this phi or if the memory path is dead
2608               continue;
2609             }
2610           }
2611           if (!is_unsafe_data_reference(n)) {
2612             saw_safe_input = true; // found safe input
2613             break;
2614           }
2615         }
2616         if (!saw_safe_input) {
2617           // There is a dead loop: All inputs are either dead or reference back to this phi
2618           return top;
2619         }
2620 
2621         // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2622         //     MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2623         PhaseIterGVN* igvn = phase->is_IterGVN();
2624         assert(igvn != nullptr, "sanity check");
2625         PhiNode* new_base = (PhiNode*) clone();
2626         // Must eagerly register phis, since they participate in loops.
2627         igvn->register_new_node_with_optimizer(new_base);
2628 
2629         MergeMemNode* result = MergeMemNode::make(new_base);
2630         for (uint i = 1; i < req(); ++i) {
2631           Node *ii = in(i);
2632           if (ii->is_MergeMem()) {
2633             MergeMemNode* n = ii->as_MergeMem();





2634             for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2635               // If we have not seen this slice yet, make a phi for it.
2636               bool made_new_phi = false;
2637               if (mms.is_empty()) {
2638                 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2639                 made_new_phi = true;
2640                 igvn->register_new_node_with_optimizer(new_phi);
2641                 mms.set_memory(new_phi);
2642               }
2643               Node* phi = mms.memory();
2644               assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2645               phi->set_req_X(i, mms.memory2(), phase);
2646             }
2647           }
2648         }
2649         // Distribute all self-loops.
2650         { // (Extra braces to hide mms.)
2651           for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2652             Node* phi = mms.memory();
2653             for (uint i = 1; i < req(); ++i) {

2658           }
2659         }
2660 
2661         // We could immediately transform the new Phi nodes here, but that can
2662         // result in creating an excessive number of new nodes within a single
2663         // IGVN iteration. We have put the Phi nodes on the IGVN worklist, so
2664         // they are transformed later on in any case.
2665 
2666         // Replace self with the result.
2667         return result;
2668       }
2669     }
2670     //
2671     // Other optimizations on the memory chain
2672     //
2673     const TypePtr* at = adr_type();
2674     for( uint i=1; i<req(); ++i ) {// For all paths in
2675       Node *ii = in(i);
2676       Node *new_in = MemNode::optimize_memory_chain(ii, at, nullptr, phase);
2677       if (ii != new_in ) {
2678         set_req_X(i, new_in, phase);
2679         progress = this;
2680       }
2681     }
2682   }
2683 
2684 #ifdef _LP64
2685   // Push DecodeN/DecodeNKlass down through phi.
2686   // The rest of phi graph will transform by split EncodeP node though phis up.
2687   if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == nullptr) {
2688     bool may_push = true;
2689     bool has_decodeN = false;
2690     bool is_decodeN = false;
2691     for (uint i=1; i<req(); ++i) {// For all paths in
2692       Node *ii = in(i);
2693       if (ii->is_DecodeNarrowPtr() && ii->bottom_type() == bottom_type()) {
2694         // Do optimization if a non dead path exist.
2695         if (ii->in(1)->bottom_type() != Type::TOP) {
2696           has_decodeN = true;
2697           is_decodeN = ii->is_DecodeN();
2698         }

2726             if (is_decodeN) {
2727               new_ii = new EncodePNode(ii, narrow_t);
2728             } else {
2729               new_ii = new EncodePKlassNode(ii, narrow_t);
2730             }
2731             igvn->register_new_node_with_optimizer(new_ii);
2732           }
2733         }
2734         new_phi->set_req(i, new_ii);
2735       }
2736       igvn->register_new_node_with_optimizer(new_phi, this);
2737       if (is_decodeN) {
2738         progress = new DecodeNNode(new_phi, bottom_type());
2739       } else {
2740         progress = new DecodeNKlassNode(new_phi, bottom_type());
2741       }
2742     }
2743   }
2744 #endif
2745 





2746   // Try to convert a Phi with two duplicated convert nodes into a phi of the pre-conversion type and the convert node
2747   // proceeding the phi, to de-duplicate the convert node and compact the IR.
2748   if (can_reshape && progress == nullptr) {
2749     ConvertNode* convert = in(1)->isa_Convert();
2750     if (convert != nullptr) {
2751       int conv_op = convert->Opcode();
2752       bool ok = true;
2753 
2754       // Check the rest of the inputs
2755       for (uint i = 2; i < req(); i++) {
2756         // Make sure that all inputs are of the same type of convert node
2757         if (in(i)->Opcode() != conv_op) {
2758           ok = false;
2759           break;
2760         }
2761       }
2762 
2763       if (ok) {
2764         // Find the local bottom type to set as the type of the phi
2765         const Type* source_type = Type::get_const_basic_type(convert->in_type()->basic_type());

2788   // only after the non-bottom memory phi is processed by igvn, PhiNode::Identity doesn't run and the transformation
2789   // doesn't happen.
2790   // Look for non-bottom Phis that should be transformed and enqueue them for igvn so that PhiNode::Identity executes for
2791   // them.
2792   if (can_reshape && type() == Type::MEMORY && adr_type() == TypePtr::BOTTOM) {
2793     PhaseIterGVN* igvn = phase->is_IterGVN();
2794     uint phi_len = req();
2795     Node* phi_reg = region();
2796     for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
2797       Node* u = phi_reg->fast_out(i);
2798       assert(!u->is_Phi() || (u->in(0) == phi_reg && u->req() == phi_len), "broken Phi/Region subgraph");
2799       if (u->is_Phi() && u->as_Phi()->can_be_replaced_by(this)) {
2800         igvn->_worklist.push(u);
2801       }
2802     }
2803   }
2804 
2805   return progress;              // Return any progress
2806 }
2807 































































































2808 static int compare_types(const Type* const& e1, const Type* const& e2) {
2809   return (intptr_t)e1 - (intptr_t)e2;
2810 }
2811 
2812 // Collect types at casts that are going to be eliminated at that Phi and store them in a TypeTuple.
2813 // Sort the types using an arbitrary order so a list of some types always hashes to the same TypeTuple (and TypeTuple
2814 // pointer comparison is enough to tell if 2 list of types are the same or not)
2815 const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
2816   const Node* region = in(0);
2817   const Type* phi_type = bottom_type();
2818   ResourceMark rm;
2819   GrowableArray<const Type*> types;
2820   for (uint i = 1; i < req(); i++) {
2821     if (region->in(i) == nullptr || phase->type(region->in(i)) == Type::TOP) {
2822       continue;
2823     }
2824     Node* in = Node::in(i);
2825     const Type* t = phase->type(in);
2826     if (in == nullptr || in == this || t == Type::TOP) {
2827       continue;

3177 #ifndef PRODUCT
3178 void CatchProjNode::dump_spec(outputStream *st) const {
3179   ProjNode::dump_spec(st);
3180   st->print("@bci %d ",_handler_bci);
3181 }
3182 #endif
3183 
3184 //=============================================================================
3185 //------------------------------Identity---------------------------------------
3186 // Check for CreateEx being Identity.
3187 Node* CreateExNode::Identity(PhaseGVN* phase) {
3188   if( phase->type(in(1)) == Type::TOP ) return in(1);
3189   if( phase->type(in(0)) == Type::TOP ) return in(0);
3190   if (phase->type(in(0)->in(0)) == Type::TOP) {
3191     assert(in(0)->is_CatchProj(), "control is CatchProj");
3192     return phase->C->top(); // dead code
3193   }
3194   // We only come from CatchProj, unless the CatchProj goes away.
3195   // If the CatchProj is optimized away, then we just carry the
3196   // exception oop through.






3197   CallNode *call = in(1)->in(0)->as_Call();
3198 
3199   return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
3200           in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
3201 }
3202 
3203 //=============================================================================
3204 //------------------------------Value------------------------------------------
3205 // Check for being unreachable.
3206 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
3207   if (!in(0) || in(0)->is_top()) return Type::TOP;
3208   return bottom_type();
3209 }
3210 
3211 //------------------------------Ideal------------------------------------------
3212 // Check for no longer being part of a loop
3213 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3214   if (can_reshape && !in(0)->is_Region()) {
3215     // Dead code elimination can sometimes delete this projection so
3216     // if it's not there, there's nothing to do.

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "gc/shared/barrierSet.hpp"
  26 #include "gc/shared/c2/barrierSetC2.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "oops/objArrayKlass.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/cfgnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/convertnode.hpp"
  35 #include "opto/inlinetypenode.hpp"
  36 #include "opto/loopnode.hpp"
  37 #include "opto/machnode.hpp"
  38 #include "opto/movenode.hpp"
  39 #include "opto/mulnode.hpp"
  40 #include "opto/narrowptrnode.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/regalloc.hpp"
  43 #include "opto/regmask.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "opto/subnode.hpp"
  46 #include "opto/vectornode.hpp"
  47 #include "utilities/vmError.hpp"
  48 
  49 // Portions of code courtesy of Clifford Click
  50 
  51 // Optimization - Graph Style
  52 
  53 //=============================================================================
  54 //------------------------------Value------------------------------------------
  55 // Compute the type of the RegionNode.

 504   if (left_path == nullptr || right_path == nullptr) {
 505     return false;
 506   }
 507   Node* diamond_if = left_path->in(0);
 508   if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
 509     // Not an IfNode merging a diamond or TOP.
 510     return false;
 511   }
 512 
 513   // Check for a proper bool/cmp
 514   const Node* bol = diamond_if->in(1);
 515   if (!bol->is_Bool()) {
 516     return false;
 517   }
 518   const Node* cmp = bol->in(1);
 519   if (!cmp->is_Cmp()) {
 520     return false;
 521   }
 522   return true;
 523 }
 524 
 525 //------------------------------Ideal------------------------------------------
 526 // Return a node which is more "ideal" than the current node.  Must preserve
 527 // the CFG, but we can still strip out dead paths.
 528 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 529   if( !can_reshape && !in(0) ) return nullptr;     // Already degraded to a Copy
 530   assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
 531 
 532   // Check for RegionNode with no Phi users and both inputs come from either
 533   // arm of the same IF.  If found, then the control-flow split is useless.
 534   bool has_phis = false;
 535   if (can_reshape) {            // Need DU info to check for Phi users
 536     try_clean_mem_phis(phase->is_IterGVN());
 537     has_phis = (has_phi() != nullptr);       // Cache result
 538 
 539     if (!has_phis) {            // No Phi users?  Nothing merging?
 540       for (uint i = 1; i < req()-1; i++) {
 541         Node *if1 = in(i);
 542         if( !if1 ) continue;
 543         Node *iff = if1->in(0);
 544         if( !iff || !iff->is_If() ) continue;

 951   if (iff1 == iff2) {
 952     igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
 953     igvn->replace_input_of(region, idx1, iff1->in(0));
 954     igvn->replace_input_of(region, idx2, igvn->C->top());
 955     return (region == this); // Remove useless if (both projections map to the same control/value)
 956   }
 957   BoolNode* bol1 = iff1->in(1)->isa_Bool();
 958   BoolNode* bol2 = iff2->in(1)->isa_Bool();
 959   if (bol1 == nullptr || bol2 == nullptr) {
 960     return false; // No bool inputs found
 961   }
 962   Node* cmp1 = bol1->in(1);
 963   Node* cmp2 = bol2->in(1);
 964   bool commute = false;
 965   if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
 966     return false; // No comparison
 967   } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
 968              cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
 969              cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
 970              cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
 971              cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() ||
 972              cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) {
 973     // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
 974     // SubTypeCheck is not commutative
 975     return false;
 976   } else if (cmp1 != cmp2) {
 977     if (cmp1->in(1) == cmp2->in(2) &&
 978         cmp1->in(2) == cmp2->in(1)) {
 979       commute = true; // Same but swapped inputs, commute the test
 980     } else {
 981       return false; // Ifs are not comparing the same values
 982     }
 983   }
 984   proj1 = proj1->other_if_proj();
 985   proj2 = proj2->other_if_proj();
 986   if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
 987          proj2->unique_ctrl_out_or_null() == this) ||
 988         (proj2->unique_ctrl_out_or_null() == iff1 &&
 989          proj1->unique_ctrl_out_or_null() == this))) {
 990     return false; // Ifs are not connected through other projs
 991   }
 992   // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged

1031     st->print("#reducible ");
1032     break;
1033   case RegionNode::LoopStatus::NeverIrreducibleEntry:
1034     break; // nothing
1035   }
1036 }
1037 #endif
1038 
1039 // Find the one non-null required input.  RegionNode only
1040 Node *Node::nonnull_req() const {
1041   assert( is_Region(), "" );
1042   for( uint i = 1; i < _cnt; i++ )
1043     if( in(i) )
1044       return in(i);
1045   ShouldNotReachHere();
1046   return nullptr;
1047 }
1048 
1049 
1050 //=============================================================================
1051 // note that these functions assume that the _adr_type field is flat
1052 uint PhiNode::hash() const {
1053   const Type* at = _adr_type;
1054   return TypeNode::hash() + (at ? at->hash() : 0);
1055 }
1056 bool PhiNode::cmp( const Node &n ) const {
1057   return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1058 }
1059 static inline
1060 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1061   if (at == nullptr || at == TypePtr::BOTTOM)  return at;
1062   return Compile::current()->alias_type(at)->adr_type();
1063 }
1064 
1065 //----------------------------make---------------------------------------------
1066 // create a new phi with edges matching r and set (initially) to x
1067 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1068   uint preds = r->req();   // Number of predecessor paths
1069   assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flat_accesses_share_alias()), "flatten at");
1070   PhiNode* p = new PhiNode(r, t, at);
1071   for (uint j = 1; j < preds; j++) {
1072     // Fill in all inputs, except those which the region does not yet have
1073     if (r->in(j) != nullptr)
1074       p->init_req(j, x);
1075   }
1076   return p;
1077 }
1078 PhiNode* PhiNode::make(Node* r, Node* x) {
1079   const Type*    t  = x->bottom_type();
1080   const TypePtr* at = nullptr;
1081   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
1082   return make(r, x, t, at);
1083 }
1084 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1085   const Type*    t  = x->bottom_type();
1086   const TypePtr* at = nullptr;
1087   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
1088   return new PhiNode(r, t, at);
1089 }

1178       np->as_Phi()->verify_adr_type(visited, at);
1179     } else if (n->bottom_type() == Type::TOP
1180                || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1181       // ignore top inputs
1182     } else {
1183       const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1184       // recheck phi/non-phi consistency at leaves:
1185       assert((nat != nullptr) == (at != nullptr), "");
1186       assert(nat == at || nat == TypePtr::BOTTOM,
1187              "adr_type must be consistent at leaves of phi nest");
1188     }
1189   }
1190 }
1191 
1192 // Verify a whole nest of phis rooted at this one.
1193 void PhiNode::verify_adr_type(bool recursive) const {
1194   if (VMError::is_error_reported())  return;  // muzzle asserts when debugging an error
1195   if (Node::in_dump())               return;  // muzzle asserts when printing
1196 
1197   assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");
1198   // Flat array element shouldn't get their own memory slice until flat_accesses_share_alias is cleared.
1199   // It could be the graph has no loads/stores and flat_accesses_share_alias is never cleared. EA could still
1200   // creates per element Phis but that wouldn't be a problem as there are no memory accesses for that array.
1201   assert(_adr_type == nullptr || _adr_type->isa_aryptr() == nullptr ||
1202          _adr_type->is_aryptr()->is_known_instance() ||
1203          !_adr_type->is_aryptr()->is_flat() ||
1204          !Compile::current()->flat_accesses_share_alias() ||
1205          _adr_type == TypeAryPtr::INLINES, "flat array element shouldn't get its own slice yet");
1206 
1207   if (!VerifyAliases)       return;  // verify thoroughly only if requested
1208 
1209   assert(_adr_type == flatten_phi_adr_type(_adr_type),
1210          "Phi::adr_type must be pre-normalized");
1211 
1212   if (recursive) {
1213     VectorSet visited;
1214     verify_adr_type(visited, _adr_type);
1215   }
1216 }
1217 #endif
1218 
1219 
1220 //------------------------------Value------------------------------------------
1221 // Compute the type of the PhiNode
1222 const Type* PhiNode::Value(PhaseGVN* phase) const {
1223   Node *r = in(0);              // RegionNode
1224   if( !r )                      // Copy or dead
1225     return in(1) ? phase->type(in(1)) : Type::TOP;

1469   assert(req() == 3, "same as region");
1470   RegionNode* region = in(0)->as_Region();
1471   for (uint i = 1; i < 3; i++) {
1472     Node* phi_input = in(i);
1473     if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1474       // Nothing is control-dependent on path #i except the region itself.
1475       MergeMemNode* merge_mem = phi_input->as_MergeMem();
1476       uint j = 3 - i;
1477       Node* other_phi_input = in(j);
1478       if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory() && !is_data_loop(region, phi_input, igvn)) {
1479         // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1480         // Only proceed if the transformation doesn't create a data loop
1481         // This will allow the diamond to collapse completely if there are no other phis left.
1482         igvn->replace_node(this, merge_mem);
1483         return true;
1484       }
1485     }
1486   }
1487   return false;
1488 }
1489 
1490 //----------------------------check_cmove_id-----------------------------------
1491 // Check for CMove'ing a constant after comparing against the constant.
1492 // Happens all the time now, since if we compare equality vs a constant in
1493 // the parser, we "know" the variable is constant on one path and we force
1494 // it.  Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1495 // conditional move: "x = (x==0)?0:x;".  Yucko.  This fix is slightly more
1496 // general in that we don't need constants.  Since CMove's are only inserted
1497 // in very special circumstances, we do it here on generic Phi's.
1498 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1499   assert(true_path !=0, "only diamond shape graph expected");
1500 
1501   // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1502   // phi->region->if_proj->ifnode->bool->cmp
1503   Node*     region = in(0);
1504   Node*     iff    = region->in(1)->in(0);
1505   BoolNode* b      = iff->in(1)->as_Bool();
1506   Node*     cmp    = b->in(1);
1507   Node*     tval   = in(true_path);
1508   Node*     fval   = in(3-true_path);
1509   Node*     id     = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);

1524   }
1525 
1526   return id;
1527 }
1528 
1529 //------------------------------Identity---------------------------------------
1530 // Check for Region being Identity.
1531 Node* PhiNode::Identity(PhaseGVN* phase) {
1532   if (must_wait_for_region_in_irreducible_loop(phase)) {
1533     return this;
1534   }
1535   // Check for no merging going on
1536   // (There used to be special-case code here when this->region->is_Loop.
1537   // It would check for a tributary phi on the backedge that the main phi
1538   // trivially, perhaps with a single cast.  The unique_input method
1539   // does all this and more, by reducing such tributaries to 'this'.)
1540   Node* uin = unique_input(phase, false);
1541   if (uin != nullptr) {
1542     return uin;
1543   }
1544   uin = unique_constant_input_recursive(phase);
1545   if (uin != nullptr) {
1546     return uin;
1547   }
1548 
1549   int true_path = is_diamond_phi();
1550   // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet.
1551   if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) {
1552     Node* id = is_cmove_id(phase, true_path);
1553     if (id != nullptr) {
1554       return id;
1555     }
1556   }
1557 
1558   // Looking for phis with identical inputs.  If we find one that has
1559   // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1560   if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1561       TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1562     uint phi_len = req();
1563     Node* phi_reg = region();
1564     for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1565       Node* u = phi_reg->fast_out(i);
1566       assert(!u->is_Phi() || u->in(0) == phi_reg, "broken Phi/Region subgraph");
1567       if (u->is_Phi() && u->req() == phi_len && can_be_replaced_by(u->as_Phi())) {

1618     }
1619     // Check for a unique input (maybe uncasted)
1620     if (input == nullptr) {
1621       input = un;
1622     } else if (input != un) {
1623       input = NodeSentinel; // no unique input
1624     }
1625   }
1626   if (input == nullptr) {
1627     return phase->C->top();        // no inputs
1628   }
1629 
1630   if (input != NodeSentinel) {
1631     return input;           // one unique direct input
1632   }
1633 
1634   // Nothing.
1635   return nullptr;
1636 }
1637 
1638 // Find the unique input, try to look recursively through input Phis
1639 Node* PhiNode::unique_constant_input_recursive(PhaseGVN* phase) {
1640   if (!phase->is_IterGVN()) {
1641     return nullptr;
1642   }
1643 
1644   ResourceMark rm;
1645   Node* unique = nullptr;
1646   Unique_Node_List visited;
1647   visited.push(this);
1648 
1649   for (uint visited_idx = 0; visited_idx < visited.size(); visited_idx++) {
1650     Node* current = visited.at(visited_idx);
1651     for (uint i = 1; i < current->req(); i++) {
1652       Node* phi_in = current->in(i);
1653       if (phi_in == nullptr) {
1654         continue;
1655       }
1656 
1657       if (phi_in->is_Phi()) {
1658         visited.push(phi_in);
1659       } else {
1660         if (unique == nullptr) {
1661           if (!phi_in->is_Con()) {
1662             return nullptr;
1663           }
1664           unique = phi_in;
1665         } else if (unique != phi_in) {
1666           return nullptr;
1667         }
1668       }
1669     }
1670   }
1671   return unique;
1672 }
1673 
1674 //------------------------------is_x2logic-------------------------------------
1675 // Check for simple convert-to-boolean pattern
1676 // If:(C Bool) Region:(IfF IfT) Phi:(Region 0 1)
1677 // Convert Phi to an ConvIB.
1678 static Node *is_x2logic( PhaseGVN *phase, PhiNode *phi, int true_path ) {
1679   assert(true_path !=0, "only diamond shape graph expected");
1680 
1681   // If we're late in the optimization process, we may have already expanded Conv2B nodes
1682   if (phase->C->post_loop_opts_phase() && !Matcher::match_rule_supported(Op_Conv2B)) {
1683     return nullptr;
1684   }
1685 
1686   // Convert the true/false index into an expected 0/1 return.
1687   // Map 2->0 and 1->1.
1688   int flipped = 2-true_path;
1689 
1690   // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1691   // phi->region->if_proj->ifnode->bool->cmp
1692   Node *region = phi->in(0);
1693   Node *iff = region->in(1)->in(0);

2121 
2122     if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2123     if (worklist.member(rc->in(0)->in(1))) {
2124       delay = true;
2125       break;
2126     }
2127 
2128     if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2129     if (worklist.member(rc->in(0)->in(1)->in(1))) {
2130       delay = true;
2131       break;
2132     }
2133   }
2134 
2135   if (delay) {
2136     worklist.push(this);
2137   }
2138   return delay;
2139 }
2140 
2141 // Push inline type input nodes (and null) down through the phi recursively (can handle data loops).
2142 InlineTypeNode* PhiNode::push_inline_types_down(PhaseGVN* phase, bool can_reshape, ciInlineKlass* inline_klass) {
2143   assert(inline_klass != nullptr, "must be");
2144   InlineTypeNode* vt = InlineTypeNode::make_null(*phase, inline_klass, /* transform = */ false)->clone_with_phis(phase, in(0), nullptr, !_type->maybe_null(), true);
2145   if (can_reshape) {
2146     // Replace phi right away to be able to use the inline
2147     // type node when reaching the phi again through data loops.
2148     PhaseIterGVN* igvn = phase->is_IterGVN();
2149     for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2150       Node* u = fast_out(i);
2151       igvn->rehash_node_delayed(u);
2152       imax -= u->replace_edge(this, vt);
2153       --i;
2154     }
2155     igvn->rehash_node_delayed(this);
2156     assert(outcnt() == 0, "should be dead now");
2157   }
2158   ResourceMark rm;
2159   Node_List casts;
2160   for (uint i = 1; i < req(); ++i) {
2161     Node* n = in(i);
2162     while (n->is_ConstraintCast()) {
2163       casts.push(n);
2164       n = n->in(1);
2165     }
2166     if (phase->type(n)->is_zero_type()) {
2167       n = InlineTypeNode::make_null(*phase, inline_klass);
2168     } else if (n->is_Phi()) {
2169       assert(can_reshape, "can only handle phis during IGVN");
2170       n = phase->transform(n->as_Phi()->push_inline_types_down(phase, can_reshape, inline_klass));
2171     }
2172     while (casts.size() != 0) {
2173       // Push the cast(s) through the InlineTypeNode
2174       // TODO 8302217 Can we avoid cloning? See InlineTypeNode::clone_if_required
2175       Node* cast = casts.pop()->clone();
2176       cast->set_req_X(1, n->as_InlineType()->get_oop(), phase);
2177       n = n->clone();
2178       n->as_InlineType()->set_oop(*phase, phase->transform(cast));
2179       n = phase->transform(n);
2180       if (n->is_top()) {
2181         break;
2182       }
2183     }
2184     bool transform = !can_reshape && (i == (req()-1)); // Transform phis on last merge
2185     assert(n->is_top() || n->is_InlineType(), "Only InlineType or top at this point.");
2186     if (n->is_InlineType()) {
2187       vt->merge_with(phase, n->as_InlineType(), i, transform);
2188     } // else nothing to do: phis above vt created by clone_with_phis are initialized to top already.
2189   }
2190   return vt;
2191 }
2192 
2193 // If the Phi's Region is in an irreducible loop, and the Region
2194 // has had an input removed, but not yet transformed, it could be
2195 // that the Region (and this Phi) are not reachable from Root.
2196 // If we allow the Phi to collapse before the Region, this may lead
2197 // to dead-loop data. Wait for the Region to check for reachability,
2198 // and potentially remove the dead code.
2199 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2200   RegionNode* region = in(0)->as_Region();
2201   if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2202     Node* top = phase->C->top();
2203     for (uint j = 1; j < req(); j++) {
2204       Node* rc = region->in(j); // for each control input
2205       if (rc == nullptr || phase->type(rc) == Type::TOP) {
2206         // Region is missing a control input
2207         Node* n = in(j);
2208         if (n != nullptr && n != top) {
2209           // Phi still has its input, so region just lost its input
2210           return true;
2211         }
2212       }

2609     //                Phi (this)   |
2610     //                 |           |
2611     //                 +-----------+
2612     //
2613     // Generally, there are issues with non-termination with such circularity
2614     // (see comment further below). However, if there is a direct loop to self,
2615     // splitting the Phi through the MergeMem will result in the below.
2616     //
2617     //               +---+
2618     //               |   |
2619     //               v   |
2620     //              Phi  |
2621     //               |\  |
2622     //               | +-+
2623     // (base_memory) v
2624     //              MergeMem
2625     //
2626     // This split breaks the circularity and consequently does not lead to
2627     // non-termination.
2628     uint merge_width = 0;
2629     // TODO revisit this with JDK-8247216
2630     bool mergemem_only = true;
2631     bool split_always_terminates = false; // Is splitting guaranteed to terminate?
2632     for( uint i=1; i<req(); ++i ) {// For all paths in
2633       Node *ii = in(i);
2634       // TOP inputs should not be counted as safe inputs because if the
2635       // Phi references itself through all other inputs then splitting the
2636       // Phi through memory merges would create dead loop at later stage.
2637       if (ii == top) {
2638         return nullptr; // Delay optimization until graph is cleaned.
2639       }
2640       if (ii->is_MergeMem()) {
2641         MergeMemNode* n = ii->as_MergeMem();
2642         merge_width = MAX2(merge_width, n->req());
2643         if (n->base_memory() == this) {
2644           split_always_terminates = true;
2645         }
2646       } else {
2647         mergemem_only = false;
2648       }
2649     }
2650 
2651     // There are cases with circular dependencies between bottom Phis
2652     // and MergeMems. Below is a minimal example.
2653     //
2654     //               +------------+
2655     //               |            |
2656     // (base_memory) v            |
2657     //              MergeMem      |
2658     //                 |          |
2659     //                 v          |
2660     //                Phi (this)  |
2661     //                 |          |
2662     //                 v          |
2663     //                Phi         |
2664     //                 |          |
2665     //                 +----------+
2666     //
2667     // Here, we cannot break the circularity through a self-loop as there
2668     // are two Phis involved. Repeatedly splitting the Phis through the
2669     // MergeMem leads to non-termination. We check for non-termination below.
2670     // Only check for non-termination if necessary.
2671     if (!mergemem_only && !split_always_terminates && adr_type() == TypePtr::BOTTOM &&
2672         merge_width > Compile::AliasIdxRaw) {
2673       split_always_terminates = is_split_through_mergemem_terminating();
2674     }
2675 
2676     if (merge_width > Compile::AliasIdxRaw) {
2677       // found at least one non-empty MergeMem
2678       const TypePtr* at = adr_type();
2679       if (at != TypePtr::BOTTOM) {
2680         // Patch the existing phi to select an input from the merge:
2681         // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2682         //     Phi:AT1(...m1...)
2683         int alias_idx = phase->C->get_alias_index(at);
2684         for (uint i=1; i<req(); ++i) {
2685           Node *ii = in(i);
2686           if (ii->is_MergeMem()) {
2687             MergeMemNode* n = ii->as_MergeMem();
2688             // compress paths and change unreachable cycles to TOP
2689             // If not, we can update the input infinitely along a MergeMem cycle
2690             // Equivalent code is in MemNode::Ideal_common
2691             Node *m  = phase->transform(n);
2692             if (outcnt() == 0) {  // Above transform() may kill us!
2693               return top;
2694             }
2695             // If transformed to a MergeMem, get the desired slice
2696             // Otherwise the returned node represents memory for every slice
2697             Node *new_mem = (m->is_MergeMem()) ?
2698                              m->as_MergeMem()->memory_at(alias_idx) : m;
2699             // Update input if it is progress over what we have now
2700             if (new_mem != ii) {
2701               set_req_X(i, new_mem, phase->is_IterGVN());
2702               progress = this;
2703             }
2704           }
2705         }
2706       } else if (mergemem_only || split_always_terminates) {
2707         // If all inputs reference this phi (directly or through data nodes) -
2708         // it is a dead loop.
2709         bool saw_safe_input = false;
2710         for (uint j = 1; j < req(); ++j) {
2711           Node* n = in(j);
2712           if (n->is_MergeMem()) {
2713             MergeMemNode* mm = n->as_MergeMem();
2714             if (mm->base_memory() == this || mm->base_memory() == mm->empty_memory()) {
2715               // Skip this input if it references back to this phi or if the memory path is dead
2716               continue;
2717             }
2718           }
2719           if (!is_unsafe_data_reference(n)) {
2720             saw_safe_input = true; // found safe input
2721             break;
2722           }
2723         }
2724         if (!saw_safe_input) {
2725           // There is a dead loop: All inputs are either dead or reference back to this phi
2726           return top;
2727         }
2728 
2729         // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2730         //     MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2731         PhaseIterGVN* igvn = phase->is_IterGVN();
2732         assert(igvn != nullptr, "sanity check");
2733         PhiNode* new_base = (PhiNode*) clone();
2734         // Must eagerly register phis, since they participate in loops.
2735         igvn->register_new_node_with_optimizer(new_base);
2736 
2737         MergeMemNode* result = MergeMemNode::make(new_base);
2738         for (uint i = 1; i < req(); ++i) {
2739           Node *ii = in(i);
2740           if (ii->is_MergeMem()) {
2741             MergeMemNode* n = ii->as_MergeMem();
2742             if (igvn) {
2743               // TODO revisit this with JDK-8247216
2744               // Put 'n' on the worklist because it might be modified by MergeMemStream::iteration_setup
2745               igvn->_worklist.push(n);
2746             }
2747             for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2748               // If we have not seen this slice yet, make a phi for it.
2749               bool made_new_phi = false;
2750               if (mms.is_empty()) {
2751                 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2752                 made_new_phi = true;
2753                 igvn->register_new_node_with_optimizer(new_phi);
2754                 mms.set_memory(new_phi);
2755               }
2756               Node* phi = mms.memory();
2757               assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2758               phi->set_req_X(i, mms.memory2(), phase);
2759             }
2760           }
2761         }
2762         // Distribute all self-loops.
2763         { // (Extra braces to hide mms.)
2764           for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2765             Node* phi = mms.memory();
2766             for (uint i = 1; i < req(); ++i) {

2771           }
2772         }
2773 
2774         // We could immediately transform the new Phi nodes here, but that can
2775         // result in creating an excessive number of new nodes within a single
2776         // IGVN iteration. We have put the Phi nodes on the IGVN worklist, so
2777         // they are transformed later on in any case.
2778 
2779         // Replace self with the result.
2780         return result;
2781       }
2782     }
2783     //
2784     // Other optimizations on the memory chain
2785     //
2786     const TypePtr* at = adr_type();
2787     for( uint i=1; i<req(); ++i ) {// For all paths in
2788       Node *ii = in(i);
2789       Node *new_in = MemNode::optimize_memory_chain(ii, at, nullptr, phase);
2790       if (ii != new_in ) {
2791         set_req_X(i, new_in, phase->is_IterGVN());
2792         progress = this;
2793       }
2794     }
2795   }
2796 
2797 #ifdef _LP64
2798   // Push DecodeN/DecodeNKlass down through phi.
2799   // The rest of phi graph will transform by split EncodeP node though phis up.
2800   if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == nullptr) {
2801     bool may_push = true;
2802     bool has_decodeN = false;
2803     bool is_decodeN = false;
2804     for (uint i=1; i<req(); ++i) {// For all paths in
2805       Node *ii = in(i);
2806       if (ii->is_DecodeNarrowPtr() && ii->bottom_type() == bottom_type()) {
2807         // Do optimization if a non dead path exist.
2808         if (ii->in(1)->bottom_type() != Type::TOP) {
2809           has_decodeN = true;
2810           is_decodeN = ii->is_DecodeN();
2811         }

2839             if (is_decodeN) {
2840               new_ii = new EncodePNode(ii, narrow_t);
2841             } else {
2842               new_ii = new EncodePKlassNode(ii, narrow_t);
2843             }
2844             igvn->register_new_node_with_optimizer(new_ii);
2845           }
2846         }
2847         new_phi->set_req(i, new_ii);
2848       }
2849       igvn->register_new_node_with_optimizer(new_phi, this);
2850       if (is_decodeN) {
2851         progress = new DecodeNNode(new_phi, bottom_type());
2852       } else {
2853         progress = new DecodeNKlassNode(new_phi, bottom_type());
2854       }
2855     }
2856   }
2857 #endif
2858 
2859   Node* inline_type = try_push_inline_types_down(phase, can_reshape);
2860   if (inline_type != this) {
2861     return inline_type;
2862   }
2863 
2864   // Try to convert a Phi with two duplicated convert nodes into a phi of the pre-conversion type and the convert node
2865   // proceeding the phi, to de-duplicate the convert node and compact the IR.
2866   if (can_reshape && progress == nullptr) {
2867     ConvertNode* convert = in(1)->isa_Convert();
2868     if (convert != nullptr) {
2869       int conv_op = convert->Opcode();
2870       bool ok = true;
2871 
2872       // Check the rest of the inputs
2873       for (uint i = 2; i < req(); i++) {
2874         // Make sure that all inputs are of the same type of convert node
2875         if (in(i)->Opcode() != conv_op) {
2876           ok = false;
2877           break;
2878         }
2879       }
2880 
2881       if (ok) {
2882         // Find the local bottom type to set as the type of the phi
2883         const Type* source_type = Type::get_const_basic_type(convert->in_type()->basic_type());

2906   // only after the non-bottom memory phi is processed by igvn, PhiNode::Identity doesn't run and the transformation
2907   // doesn't happen.
2908   // Look for non-bottom Phis that should be transformed and enqueue them for igvn so that PhiNode::Identity executes for
2909   // them.
2910   if (can_reshape && type() == Type::MEMORY && adr_type() == TypePtr::BOTTOM) {
2911     PhaseIterGVN* igvn = phase->is_IterGVN();
2912     uint phi_len = req();
2913     Node* phi_reg = region();
2914     for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
2915       Node* u = phi_reg->fast_out(i);
2916       assert(!u->is_Phi() || (u->in(0) == phi_reg && u->req() == phi_len), "broken Phi/Region subgraph");
2917       if (u->is_Phi() && u->as_Phi()->can_be_replaced_by(this)) {
2918         igvn->_worklist.push(u);
2919       }
2920     }
2921   }
2922 
2923   return progress;              // Return any progress
2924 }
2925 
2926 // Check recursively if inputs are either an inline type, constant null
2927 // or another Phi (including self references through data loops). If so,
2928 // push the inline types down through the phis to enable folding of loads.
2929 Node* PhiNode::try_push_inline_types_down(PhaseGVN* phase, const bool can_reshape) {
2930   if (!can_be_inline_type()) {
2931     return this;
2932   }
2933 
2934   ciInlineKlass* inline_klass;
2935   if (can_push_inline_types_down(phase, can_reshape, inline_klass)) {
2936     assert(inline_klass != nullptr, "must be");
2937     return push_inline_types_down(phase, can_reshape, inline_klass);
2938   }
2939   return this;
2940 }
2941 
2942 bool PhiNode::can_push_inline_types_down(PhaseGVN* phase, const bool can_reshape, ciInlineKlass*& inline_klass) {
2943   if (req() <= 2) {
2944     // Dead phi.
2945     return false;
2946   }
2947   inline_klass = nullptr;
2948 
2949   // TODO 8302217 We need to prevent endless pushing through
2950   bool only_phi = (outcnt() != 0);
2951   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2952     Node* n = fast_out(i);
2953     if (n->is_InlineType() && n->in(1) == this) {
2954       return false;
2955     }
2956     if (!n->is_Phi()) {
2957       only_phi = false;
2958     }
2959   }
2960   if (only_phi) {
2961     return false;
2962   }
2963 
2964   ResourceMark rm;
2965   Unique_Node_List worklist;
2966   worklist.push(this);
2967   Node_List casts;
2968 
2969   for (uint next = 0; next < worklist.size(); next++) {
2970     Node* phi = worklist.at(next);
2971     for (uint i = 1; i < phi->req(); i++) {
2972       Node* n = phi->in(i);
2973       if (n == nullptr) {
2974         return false;
2975       }
2976       while (n->is_ConstraintCast()) {
2977         if (n->in(0) != nullptr && n->in(0)->is_top()) {
2978           // Will die, don't optimize
2979           return false;
2980         }
2981         casts.push(n);
2982         n = n->in(1);
2983       }
2984       const Type* type = phase->type(n);
2985       if (n->is_InlineType() && (inline_klass == nullptr || inline_klass == type->inline_klass())) {
2986         inline_klass = type->inline_klass();
2987       } else if (n->is_Phi() && can_reshape && n->bottom_type()->isa_ptr()) {
2988         worklist.push(n);
2989       } else if (!type->is_zero_type()) {
2990         return false;
2991       }
2992     }
2993   }
2994   if (inline_klass == nullptr) {
2995     return false;
2996   }
2997 
2998   // Check if cast nodes can be pushed through
2999   const Type* t = Type::get_const_type(inline_klass);
3000   while (casts.size() != 0 && t != nullptr) {
3001     Node* cast = casts.pop();
3002     if (t->filter(cast->bottom_type()) == Type::TOP) {
3003       return false;
3004     }
3005   }
3006 
3007   return true;
3008 }
3009 
3010 #ifdef ASSERT
3011 bool PhiNode::can_push_inline_types_down(PhaseGVN* phase) {
3012   if (!can_be_inline_type()) {
3013     return false;
3014   }
3015 
3016   ciInlineKlass* inline_klass;
3017   return can_push_inline_types_down(phase, true, inline_klass);
3018 }
3019 #endif // ASSERT
3020 
3021 static int compare_types(const Type* const& e1, const Type* const& e2) {
3022   return (intptr_t)e1 - (intptr_t)e2;
3023 }
3024 
3025 // Collect types at casts that are going to be eliminated at that Phi and store them in a TypeTuple.
3026 // Sort the types using an arbitrary order so a list of some types always hashes to the same TypeTuple (and TypeTuple
3027 // pointer comparison is enough to tell if 2 list of types are the same or not)
3028 const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
3029   const Node* region = in(0);
3030   const Type* phi_type = bottom_type();
3031   ResourceMark rm;
3032   GrowableArray<const Type*> types;
3033   for (uint i = 1; i < req(); i++) {
3034     if (region->in(i) == nullptr || phase->type(region->in(i)) == Type::TOP) {
3035       continue;
3036     }
3037     Node* in = Node::in(i);
3038     const Type* t = phase->type(in);
3039     if (in == nullptr || in == this || t == Type::TOP) {
3040       continue;

3390 #ifndef PRODUCT
3391 void CatchProjNode::dump_spec(outputStream *st) const {
3392   ProjNode::dump_spec(st);
3393   st->print("@bci %d ",_handler_bci);
3394 }
3395 #endif
3396 
3397 //=============================================================================
3398 //------------------------------Identity---------------------------------------
3399 // Check for CreateEx being Identity.
3400 Node* CreateExNode::Identity(PhaseGVN* phase) {
3401   if( phase->type(in(1)) == Type::TOP ) return in(1);
3402   if( phase->type(in(0)) == Type::TOP ) return in(0);
3403   if (phase->type(in(0)->in(0)) == Type::TOP) {
3404     assert(in(0)->is_CatchProj(), "control is CatchProj");
3405     return phase->C->top(); // dead code
3406   }
3407   // We only come from CatchProj, unless the CatchProj goes away.
3408   // If the CatchProj is optimized away, then we just carry the
3409   // exception oop through.
3410 
3411   // CheckCastPPNode::Ideal() for inline types reuses the exception
3412   // paths of a call to perform an allocation: we can see a Phi here.
3413   if (in(1)->is_Phi()) {
3414     return this;
3415   }
3416   CallNode *call = in(1)->in(0)->as_Call();
3417 
3418   return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
3419           in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
3420 }
3421 
3422 //=============================================================================
3423 //------------------------------Value------------------------------------------
3424 // Check for being unreachable.
3425 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
3426   if (!in(0) || in(0)->is_top()) return Type::TOP;
3427   return bottom_type();
3428 }
3429 
3430 //------------------------------Ideal------------------------------------------
3431 // Check for no longer being part of a loop
3432 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3433   if (can_reshape && !in(0)->is_Region()) {
3434     // Dead code elimination can sometimes delete this projection so
3435     // if it's not there, there's nothing to do.
< prev index next >