< prev index next >

src/hotspot/share/opto/loopopts.cpp

Print this page

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "gc/shared/barrierSet.hpp"
  26 #include "gc/shared/c2/barrierSetC2.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/connode.hpp"
  33 #include "opto/divnode.hpp"

  34 #include "opto/loopnode.hpp"
  35 #include "opto/matcher.hpp"
  36 #include "opto/movenode.hpp"
  37 #include "opto/mulnode.hpp"
  38 #include "opto/opaquenode.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/subnode.hpp"
  41 #include "opto/subtypenode.hpp"
  42 #include "opto/superword.hpp"
  43 #include "opto/vectornode.hpp"
  44 #include "utilities/macros.hpp"
  45 
  46 //=============================================================================
  47 //------------------------------split_thru_phi---------------------------------
  48 // Split Node 'n' through merge point if there is enough win.
  49 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  50   if ((n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) ||
  51       (n->Opcode() == Op_ConvL2I && n->bottom_type() != TypeInt::INT)) {
  52     // ConvI2L/ConvL2I may have type information on it which is unsafe to push up
  53     // so disable this for now
  54     return nullptr;
  55   }
  56 
  57   // Splitting range check CastIIs through a loop induction Phi can
  58   // cause new Phis to be created that are left unrelated to the loop
  59   // induction Phi and prevent optimizations (vectorization)
  60   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  61       n->in(1) == region->as_CountedLoop()->phi()) {
  62     return nullptr;
  63   }
  64 






  65   if (cannot_split_division(n, region)) {
  66     return nullptr;
  67   }
  68 
  69   int wins = 0;
  70   assert(!n->is_CFG(), "");
  71   assert(region->is_Region(), "");
  72 
  73   const Type* type = n->bottom_type();
  74   const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
  75   Node* phi;
  76   if (t_oop != nullptr && t_oop->is_known_instance_field()) {
  77     int iid    = t_oop->instance_id();
  78     int index  = C->get_alias_index(t_oop);
  79     int offset = t_oop->offset();
  80     phi = new PhiNode(region, type, nullptr, iid, index, offset);
  81   } else {
  82     phi = PhiNode::make_blank(region, n);
  83   }
  84   uint old_unique = C->unique();

 742       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 743       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 744       // have a Phi for the base here that we convert to a CMOVE all is well
 745       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 746       // the allocator will have to produce a base by creating a CMOVE of the
 747       // relevant bases.  This puts the allocator in the business of
 748       // manufacturing expensive instructions, generally a bad plan.
 749       // Just Say No to Conditionally-Moved Derived Pointers.
 750       if (tp && tp->offset() != 0)
 751         return nullptr;
 752       cost++;
 753       break;
 754     }
 755     default:
 756       return nullptr;              // In particular, can't do memory or I/O
 757     }
 758     // Add in cost any speculative ops
 759     for (uint j = 1; j < region->req(); j++) {
 760       Node *proj = region->in(j);
 761       Node *inp = phi->in(j);




 762       if (get_ctrl(inp) == proj) { // Found local op
 763         cost++;
 764         // Check for a chain of dependent ops; these will all become
 765         // speculative in a CMOV.
 766         for (uint k = 1; k < inp->req(); k++)
 767           if (get_ctrl(inp->in(k)) == proj)
 768             cost += ConditionalMoveLimit; // Too much speculative goo
 769       }
 770     }
 771     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 772     // This will likely Split-If, a higher-payoff operation.
 773     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 774       Node* use = phi->fast_out(k);
 775       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 776         cost += ConditionalMoveLimit;
 777       // Is there a use inside the loop?
 778       // Note: check only basic types since CMoveP is pinned.
 779       if (!used_inside_loop && is_java_primitive(bt)) {
 780         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 781         if (r_loop == u_loop || r_loop->is_member(u_loop)) {

1071             assert(get_loop(lca)->_nest < n_loop->_nest || get_loop(lca)->_head->as_Loop()->is_in_infinite_subgraph(), "must not be moved into inner loop");
1072 
1073             // Move store out of the loop
1074             _igvn.replace_node(hook, n->in(MemNode::Memory));
1075             _igvn.replace_input_of(n, 0, lca);
1076             set_ctrl_and_loop(n, lca);
1077 
1078             // Disconnect the phi now. An empty phi can confuse other
1079             // optimizations in this pass of loop opts..
1080             if (phi->in(LoopNode::LoopBackControl) == phi) {
1081               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1082               n_loop->_body.yank(phi);
1083             }
1084           }
1085         }
1086       }
1087     }
1088   }
1089 }
1090 
















































1091 //------------------------------split_if_with_blocks_pre-----------------------
1092 // Do the real work in a non-recursive function.  Data nodes want to be
1093 // cloned in the pre-order so they can feed each other nicely.
1094 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1095   // Cloning these guys is unlikely to win
1096   int n_op = n->Opcode();
1097   if (n_op == Op_MergeMem) {
1098     return n;
1099   }
1100   if (n->is_Proj()) {
1101     return n;
1102   }






1103   // Do not clone-up CmpFXXX variations, as these are always
1104   // followed by a CmpI
1105   if (n->is_Cmp()) {
1106     return n;
1107   }
1108   // Attempt to use a conditional move instead of a phi/branch
1109   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1110     Node *cmov = conditional_move( n );
1111     if (cmov) {
1112       return cmov;
1113     }
1114   }
1115   if (n->is_CFG() || n->is_LoadStore()) {
1116     return n;
1117   }
1118   if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1119     if (!C->major_progress()) {   // If chance of no more loop opts...
1120       _igvn._worklist.push(n);  // maybe we'll remove them
1121     }
1122     return n;

1358 
1359   return true;
1360 }
1361 
1362 // Detect if the node is the inner strip-mined loop
1363 // Return: null if it's not the case, or the exit of outer strip-mined loop
1364 static Node* is_inner_of_stripmined_loop(const Node* out) {
1365   Node* out_le = nullptr;
1366 
1367   if (out->is_CountedLoopEnd()) {
1368       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1369 
1370       if (loop != nullptr && loop->is_strip_mined()) {
1371         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1372       }
1373   }
1374 
1375   return out_le;
1376 }
1377 


































































































1378 //------------------------------split_if_with_blocks_post----------------------
1379 // Do the real work in a non-recursive function.  CFG hackery wants to be
1380 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1381 // info.
1382 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1383 




1384   // Cloning Cmp through Phi's involves the split-if transform.
1385   // FastLock is not used by an If
1386   if (n->is_Cmp() && !n->is_FastLock()) {
1387     Node *n_ctrl = get_ctrl(n);
1388     // Determine if the Node has inputs from some local Phi.
1389     // Returns the block to clone thru.
1390     Node *n_blk = has_local_phi_input(n);
1391     if (n_blk != n_ctrl) {
1392       return;
1393     }
1394 
1395     if (!can_split_if(n_ctrl)) {
1396       return;
1397     }
1398 
1399     if (n->outcnt() != 1) {
1400       return; // Multiple bool's from 1 compare?
1401     }
1402     Node *bol = n->unique_out();
1403     assert(bol->is_Bool(), "expect a bool here");

1514           //    accesses would start to float, since we don't pin at that point.
1515           // 3. If we move from regular if: don't pin. All array accesses are already assumed to be pinned.
1516           bool pin_array_access_nodes =  n->Opcode() == Op_RangeCheck &&
1517                                          prevdom->in(0)->Opcode() != Op_RangeCheck;
1518           dominated_by(prevdom->as_IfProj(), n->as_If(), false, pin_array_access_nodes);
1519           DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1520           return;
1521         }
1522         prevdom = dom;
1523         dom = idom(prevdom);
1524       }
1525     }
1526   }
1527 
1528   try_sink_out_of_loop(n);
1529   if (C->failing()) {
1530     return;
1531   }
1532 
1533   try_move_store_after_loop(n);





1534 }
1535 
1536 // Transform:
1537 //
1538 // if (some_condition) {
1539 //   // body 1
1540 // } else {
1541 //   // body 2
1542 // }
1543 // if (some_condition) {
1544 //   // body 3
1545 // } else {
1546 //   // body 4
1547 // }
1548 //
1549 // into:
1550 //
1551 //
1552 // if (some_condition) {
1553 //   // body 1

2011   uint i;
2012   for (i = 1; i < phi->req(); i++) {
2013     Node* b = phi->in(i);
2014     if (b->is_Phi()) {
2015       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2016     } else {
2017       assert(b->is_Bool() || b->is_OpaqueNotNull() || b->is_OpaqueInitializedAssertionPredicate(),
2018              "bool, non-null check with OpaqueNotNull or Initialized Assertion Predicate with its Opaque node");
2019     }
2020   }
2021   Node* n = phi->in(1);
2022   Node* sample_opaque = nullptr;
2023   Node *sample_bool = nullptr;
2024   if (n->is_OpaqueNotNull() || n->is_OpaqueInitializedAssertionPredicate()) {
2025     sample_opaque = n;
2026     sample_bool = n->in(1);
2027     assert(sample_bool->is_Bool(), "wrong type");
2028   } else {
2029     sample_bool = n;
2030   }
2031   Node *sample_cmp = sample_bool->in(1);








2032 
2033   // Make Phis to merge the Cmp's inputs.
2034   PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
2035   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2036   for (i = 1; i < phi->req(); i++) {
2037     Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2038     Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2039     phi1->set_req(i, n1);
2040     phi2->set_req(i, n2);
2041     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2042     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2043   }
2044   // See if these Phis have been made before.
2045   // Register with optimizer
2046   Node *hit1 = _igvn.hash_find_insert(phi1);
2047   if (hit1) {                   // Hit, toss just made Phi
2048     _igvn.remove_dead_node(phi1); // Remove new phi
2049     assert(hit1->is_Phi(), "" );
2050     phi1 = (PhiNode*)hit1;      // Use existing phi
2051   } else {                      // Miss
2052     _igvn.register_new_node_with_optimizer(phi1);
2053   }
2054   Node *hit2 = _igvn.hash_find_insert(phi2);

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "gc/shared/barrierSet.hpp"
  26 #include "gc/shared/c2/barrierSetC2.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/connode.hpp"
  33 #include "opto/divnode.hpp"
  34 #include "opto/inlinetypenode.hpp"
  35 #include "opto/loopnode.hpp"
  36 #include "opto/matcher.hpp"
  37 #include "opto/movenode.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/opaquenode.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "opto/subtypenode.hpp"
  43 #include "opto/superword.hpp"
  44 #include "opto/vectornode.hpp"
  45 #include "utilities/macros.hpp"
  46 
  47 //=============================================================================
  48 //------------------------------split_thru_phi---------------------------------
  49 // Split Node 'n' through merge point if there is enough win.
  50 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  51   if ((n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) ||
  52       (n->Opcode() == Op_ConvL2I && n->bottom_type() != TypeInt::INT)) {
  53     // ConvI2L/ConvL2I may have type information on it which is unsafe to push up
  54     // so disable this for now
  55     return nullptr;
  56   }
  57 
  58   // Splitting range check CastIIs through a loop induction Phi can
  59   // cause new Phis to be created that are left unrelated to the loop
  60   // induction Phi and prevent optimizations (vectorization)
  61   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  62       n->in(1) == region->as_CountedLoop()->phi()) {
  63     return nullptr;
  64   }
  65 
  66   // Inline types should not be split through Phis because they cannot be merged
  67   // through Phi nodes but each value input needs to be merged individually.
  68   if (n->is_InlineType()) {
  69     return nullptr;
  70   }
  71 
  72   if (cannot_split_division(n, region)) {
  73     return nullptr;
  74   }
  75 
  76   int wins = 0;
  77   assert(!n->is_CFG(), "");
  78   assert(region->is_Region(), "");
  79 
  80   const Type* type = n->bottom_type();
  81   const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
  82   Node* phi;
  83   if (t_oop != nullptr && t_oop->is_known_instance_field()) {
  84     int iid    = t_oop->instance_id();
  85     int index  = C->get_alias_index(t_oop);
  86     int offset = t_oop->offset();
  87     phi = new PhiNode(region, type, nullptr, iid, index, offset);
  88   } else {
  89     phi = PhiNode::make_blank(region, n);
  90   }
  91   uint old_unique = C->unique();

 749       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 750       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 751       // have a Phi for the base here that we convert to a CMOVE all is well
 752       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 753       // the allocator will have to produce a base by creating a CMOVE of the
 754       // relevant bases.  This puts the allocator in the business of
 755       // manufacturing expensive instructions, generally a bad plan.
 756       // Just Say No to Conditionally-Moved Derived Pointers.
 757       if (tp && tp->offset() != 0)
 758         return nullptr;
 759       cost++;
 760       break;
 761     }
 762     default:
 763       return nullptr;              // In particular, can't do memory or I/O
 764     }
 765     // Add in cost any speculative ops
 766     for (uint j = 1; j < region->req(); j++) {
 767       Node *proj = region->in(j);
 768       Node *inp = phi->in(j);
 769       if (inp->isa_InlineType()) {
 770         // TODO 8302217 This prevents PhiNode::push_inline_types_through
 771         return nullptr;
 772       }
 773       if (get_ctrl(inp) == proj) { // Found local op
 774         cost++;
 775         // Check for a chain of dependent ops; these will all become
 776         // speculative in a CMOV.
 777         for (uint k = 1; k < inp->req(); k++)
 778           if (get_ctrl(inp->in(k)) == proj)
 779             cost += ConditionalMoveLimit; // Too much speculative goo
 780       }
 781     }
 782     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 783     // This will likely Split-If, a higher-payoff operation.
 784     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 785       Node* use = phi->fast_out(k);
 786       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 787         cost += ConditionalMoveLimit;
 788       // Is there a use inside the loop?
 789       // Note: check only basic types since CMoveP is pinned.
 790       if (!used_inside_loop && is_java_primitive(bt)) {
 791         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 792         if (r_loop == u_loop || r_loop->is_member(u_loop)) {

1082             assert(get_loop(lca)->_nest < n_loop->_nest || get_loop(lca)->_head->as_Loop()->is_in_infinite_subgraph(), "must not be moved into inner loop");
1083 
1084             // Move store out of the loop
1085             _igvn.replace_node(hook, n->in(MemNode::Memory));
1086             _igvn.replace_input_of(n, 0, lca);
1087             set_ctrl_and_loop(n, lca);
1088 
1089             // Disconnect the phi now. An empty phi can confuse other
1090             // optimizations in this pass of loop opts..
1091             if (phi->in(LoopNode::LoopBackControl) == phi) {
1092               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1093               n_loop->_body.yank(phi);
1094             }
1095           }
1096         }
1097       }
1098     }
1099   }
1100 }
1101 
1102 // We can't use immutable memory for the flat array check because we are loading the mark word which is
1103 // mutable. Although the bits we are interested in are immutable (we check for markWord::unlocked_value),
1104 // we need to use raw memory to not break anti dependency analysis. Below code will attempt to still move
1105 // flat array checks out of loops, mainly to enable loop unswitching.
1106 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
1107   // Skip checks for more than one array
1108   if (n->req() > 3) {
1109     return;
1110   }
1111   Node* mem = n->in(FlatArrayCheckNode::Memory);
1112   Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast();
1113   IdealLoopTree* check_loop = get_loop(get_ctrl(n));
1114   IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
1115 
1116   // Check if array is loop invariant
1117   if (!check_loop->is_member(ary_loop)) {
1118     // Walk up memory graph from the check until we leave the loop
1119     VectorSet wq;
1120     wq.set(mem->_idx);
1121     while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
1122       if (mem->is_Phi()) {
1123         mem = mem->in(1);
1124       } else if (mem->is_MergeMem()) {
1125         mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1126       } else if (mem->is_Proj()) {
1127         mem = mem->in(0);
1128       } else if (mem->is_MemBar() || mem->is_SafePoint()) {
1129         mem = mem->in(TypeFunc::Memory);
1130       } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1131         mem = mem->in(MemNode::Memory);
1132       } else {
1133 #ifdef ASSERT
1134         mem->dump();
1135 #endif
1136         ShouldNotReachHere();
1137       }
1138       if (wq.test_set(mem->_idx)) {
1139         return;
1140       }
1141     }
1142     // Replace memory input and re-compute ctrl to move the check out of the loop
1143     _igvn.replace_input_of(n, 1, mem);
1144     set_ctrl_and_loop(n, get_early_ctrl(n));
1145     Node* bol = n->unique_out();
1146     set_ctrl_and_loop(bol, get_early_ctrl(bol));
1147   }
1148 }
1149 
1150 //------------------------------split_if_with_blocks_pre-----------------------
1151 // Do the real work in a non-recursive function.  Data nodes want to be
1152 // cloned in the pre-order so they can feed each other nicely.
1153 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1154   // Cloning these guys is unlikely to win
1155   int n_op = n->Opcode();
1156   if (n_op == Op_MergeMem) {
1157     return n;
1158   }
1159   if (n->is_Proj()) {
1160     return n;
1161   }
1162 
1163   if (n->isa_FlatArrayCheck()) {
1164     move_flat_array_check_out_of_loop(n);
1165     return n;
1166   }
1167 
1168   // Do not clone-up CmpFXXX variations, as these are always
1169   // followed by a CmpI
1170   if (n->is_Cmp()) {
1171     return n;
1172   }
1173   // Attempt to use a conditional move instead of a phi/branch
1174   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1175     Node *cmov = conditional_move( n );
1176     if (cmov) {
1177       return cmov;
1178     }
1179   }
1180   if (n->is_CFG() || n->is_LoadStore()) {
1181     return n;
1182   }
1183   if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1184     if (!C->major_progress()) {   // If chance of no more loop opts...
1185       _igvn._worklist.push(n);  // maybe we'll remove them
1186     }
1187     return n;

1423 
1424   return true;
1425 }
1426 
1427 // Detect if the node is the inner strip-mined loop
1428 // Return: null if it's not the case, or the exit of outer strip-mined loop
1429 static Node* is_inner_of_stripmined_loop(const Node* out) {
1430   Node* out_le = nullptr;
1431 
1432   if (out->is_CountedLoopEnd()) {
1433       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1434 
1435       if (loop != nullptr && loop->is_strip_mined()) {
1436         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1437       }
1438   }
1439 
1440   return out_le;
1441 }
1442 
1443 bool PhaseIdealLoop::flat_array_element_type_check(Node *n) {
1444   // If the CmpP is a subtype check for a value that has just been
1445   // loaded from an array, the subtype check guarantees the value
1446   // can't be stored in a flat array and the load of the value
1447   // happens with a flat array check then: push the type check
1448   // through the phi of the flat array check. This needs special
1449   // logic because the subtype check's input is not a phi but a
1450   // LoadKlass that must first be cloned through the phi.
1451   if (n->Opcode() != Op_CmpP) {
1452     return false;
1453   }
1454 
1455   Node* klassptr = n->in(1);
1456   Node* klasscon = n->in(2);
1457 
1458   if (klassptr->is_DecodeNarrowPtr()) {
1459     klassptr = klassptr->in(1);
1460   }
1461 
1462   if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
1463     return false;
1464   }
1465 
1466   if (!klasscon->is_Con()) {
1467     return false;
1468   }
1469 
1470   Node* addr = klassptr->in(MemNode::Address);
1471 
1472   if (!addr->is_AddP()) {
1473     return false;
1474   }
1475 
1476   intptr_t offset;
1477   Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
1478 
1479   if (obj == nullptr) {
1480     return false;
1481   }
1482 
1483   assert(obj != nullptr && addr->in(AddPNode::Base) == addr->in(AddPNode::Address), "malformed AddP?");
1484   if (obj->Opcode() == Op_CastPP) {
1485     obj = obj->in(1);
1486   }
1487 
1488   if (!obj->is_Phi()) {
1489     return false;
1490   }
1491 
1492   Node* region = obj->in(0);
1493 
1494   Node* phi = PhiNode::make_blank(region, n->in(1));
1495   for (uint i = 1; i < region->req(); i++) {
1496     Node* in = obj->in(i);
1497     Node* ctrl = region->in(i);
1498     if (addr->in(AddPNode::Base) != obj) {
1499       Node* cast = addr->in(AddPNode::Base);
1500       assert(cast->Opcode() == Op_CastPP && cast->in(0) != nullptr, "inconsistent subgraph");
1501       Node* cast_clone = cast->clone();
1502       cast_clone->set_req(0, ctrl);
1503       cast_clone->set_req(1, in);
1504       register_new_node(cast_clone, ctrl);
1505       const Type* tcast = cast_clone->Value(&_igvn);
1506       _igvn.set_type(cast_clone, tcast);
1507       cast_clone->as_Type()->set_type(tcast);
1508       in = cast_clone;
1509     }
1510     Node* addr_clone = addr->clone();
1511     addr_clone->set_req(AddPNode::Base, in);
1512     addr_clone->set_req(AddPNode::Address, in);
1513     register_new_node(addr_clone, ctrl);
1514     _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
1515     Node* klassptr_clone = klassptr->clone();
1516     klassptr_clone->set_req(2, addr_clone);
1517     register_new_node(klassptr_clone, ctrl);
1518     _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
1519     if (klassptr != n->in(1)) {
1520       Node* decode = n->in(1);
1521       assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
1522       Node* decode_clone = decode->clone();
1523       decode_clone->set_req(1, klassptr_clone);
1524       register_new_node(decode_clone, ctrl);
1525       _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
1526       klassptr_clone = decode_clone;
1527     }
1528     phi->set_req(i, klassptr_clone);
1529   }
1530   register_new_node(phi, region);
1531   Node* orig = n->in(1);
1532   _igvn.replace_input_of(n, 1, phi);
1533   split_if_with_blocks_post(n);
1534   if (n->outcnt() != 0) {
1535     _igvn.replace_input_of(n, 1, orig);
1536     _igvn.remove_dead_node(phi);
1537   }
1538   return true;
1539 }
1540 
1541 //------------------------------split_if_with_blocks_post----------------------
1542 // Do the real work in a non-recursive function.  CFG hackery wants to be
1543 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1544 // info.
1545 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1546 
1547   if (flat_array_element_type_check(n)) {
1548     return;
1549   }
1550 
1551   // Cloning Cmp through Phi's involves the split-if transform.
1552   // FastLock is not used by an If
1553   if (n->is_Cmp() && !n->is_FastLock()) {
1554     Node *n_ctrl = get_ctrl(n);
1555     // Determine if the Node has inputs from some local Phi.
1556     // Returns the block to clone thru.
1557     Node *n_blk = has_local_phi_input(n);
1558     if (n_blk != n_ctrl) {
1559       return;
1560     }
1561 
1562     if (!can_split_if(n_ctrl)) {
1563       return;
1564     }
1565 
1566     if (n->outcnt() != 1) {
1567       return; // Multiple bool's from 1 compare?
1568     }
1569     Node *bol = n->unique_out();
1570     assert(bol->is_Bool(), "expect a bool here");

1681           //    accesses would start to float, since we don't pin at that point.
1682           // 3. If we move from regular if: don't pin. All array accesses are already assumed to be pinned.
1683           bool pin_array_access_nodes =  n->Opcode() == Op_RangeCheck &&
1684                                          prevdom->in(0)->Opcode() != Op_RangeCheck;
1685           dominated_by(prevdom->as_IfProj(), n->as_If(), false, pin_array_access_nodes);
1686           DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1687           return;
1688         }
1689         prevdom = dom;
1690         dom = idom(prevdom);
1691       }
1692     }
1693   }
1694 
1695   try_sink_out_of_loop(n);
1696   if (C->failing()) {
1697     return;
1698   }
1699 
1700   try_move_store_after_loop(n);
1701 
1702   // Remove multiple allocations of the same inline type
1703   if (n->is_InlineType()) {
1704     n->as_InlineType()->remove_redundant_allocations(this);
1705   }
1706 }
1707 
1708 // Transform:
1709 //
1710 // if (some_condition) {
1711 //   // body 1
1712 // } else {
1713 //   // body 2
1714 // }
1715 // if (some_condition) {
1716 //   // body 3
1717 // } else {
1718 //   // body 4
1719 // }
1720 //
1721 // into:
1722 //
1723 //
1724 // if (some_condition) {
1725 //   // body 1

2183   uint i;
2184   for (i = 1; i < phi->req(); i++) {
2185     Node* b = phi->in(i);
2186     if (b->is_Phi()) {
2187       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2188     } else {
2189       assert(b->is_Bool() || b->is_OpaqueNotNull() || b->is_OpaqueInitializedAssertionPredicate(),
2190              "bool, non-null check with OpaqueNotNull or Initialized Assertion Predicate with its Opaque node");
2191     }
2192   }
2193   Node* n = phi->in(1);
2194   Node* sample_opaque = nullptr;
2195   Node *sample_bool = nullptr;
2196   if (n->is_OpaqueNotNull() || n->is_OpaqueInitializedAssertionPredicate()) {
2197     sample_opaque = n;
2198     sample_bool = n->in(1);
2199     assert(sample_bool->is_Bool(), "wrong type");
2200   } else {
2201     sample_bool = n;
2202   }
2203   Node* sample_cmp = sample_bool->in(1);
2204   const Type* t = Type::TOP;
2205   const TypePtr* at = nullptr;
2206   if (sample_cmp->is_FlatArrayCheck()) {
2207     // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
2208     assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
2209     t = Type::MEMORY;
2210     at = TypeRawPtr::BOTTOM;
2211   }
2212 
2213   // Make Phis to merge the Cmp's inputs.
2214   PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
2215   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2216   for (i = 1; i < phi->req(); i++) {
2217     Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2218     Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2219     phi1->set_req(i, n1);
2220     phi2->set_req(i, n2);
2221     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2222     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2223   }
2224   // See if these Phis have been made before.
2225   // Register with optimizer
2226   Node *hit1 = _igvn.hash_find_insert(phi1);
2227   if (hit1) {                   // Hit, toss just made Phi
2228     _igvn.remove_dead_node(phi1); // Remove new phi
2229     assert(hit1->is_Phi(), "" );
2230     phi1 = (PhiNode*)hit1;      // Use existing phi
2231   } else {                      // Miss
2232     _igvn.register_new_node_with_optimizer(phi1);
2233   }
2234   Node *hit2 = _igvn.hash_find_insert(phi2);
< prev index next >