< prev index next >

src/hotspot/share/opto/loopopts.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/divnode.hpp"

  36 #include "opto/loopnode.hpp"
  37 #include "opto/matcher.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/opaquenode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/subnode.hpp"
  43 #include "opto/subtypenode.hpp"
  44 #include "opto/vectornode.hpp"
  45 #include "utilities/macros.hpp"
  46 
  47 //=============================================================================
  48 //------------------------------split_thru_phi---------------------------------
  49 // Split Node 'n' through merge point if there is enough win.
  50 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  51   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
  52     // ConvI2L may have type information on it which is unsafe to push up
  53     // so disable this for now
  54     return nullptr;
  55   }
  56 
  57   // Splitting range check CastIIs through a loop induction Phi can
  58   // cause new Phis to be created that are left unrelated to the loop
  59   // induction Phi and prevent optimizations (vectorization)
  60   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  61       n->in(1) == region->as_CountedLoop()->phi()) {
  62     return nullptr;
  63   }
  64 






  65   if (cannot_split_division(n, region)) {
  66     return nullptr;
  67   }
  68 
  69   int wins = 0;
  70   assert(!n->is_CFG(), "");
  71   assert(region->is_Region(), "");
  72 
  73   const Type* type = n->bottom_type();
  74   const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
  75   Node* phi;
  76   if (t_oop != nullptr && t_oop->is_known_instance_field()) {
  77     int iid    = t_oop->instance_id();
  78     int index  = C->get_alias_index(t_oop);
  79     int offset = t_oop->offset();
  80     phi = new PhiNode(region, type, nullptr, iid, index, offset);
  81   } else {
  82     phi = PhiNode::make_blank(region, n);
  83   }
  84   uint old_unique = C->unique();

 681       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 682       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 683       // have a Phi for the base here that we convert to a CMOVE all is well
 684       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 685       // the allocator will have to produce a base by creating a CMOVE of the
 686       // relevant bases.  This puts the allocator in the business of
 687       // manufacturing expensive instructions, generally a bad plan.
 688       // Just Say No to Conditionally-Moved Derived Pointers.
 689       if (tp && tp->offset() != 0)
 690         return nullptr;
 691       cost++;
 692       break;
 693     }
 694     default:
 695       return nullptr;              // In particular, can't do memory or I/O
 696     }
 697     // Add in cost any speculative ops
 698     for (uint j = 1; j < region->req(); j++) {
 699       Node *proj = region->in(j);
 700       Node *inp = phi->in(j);




 701       if (get_ctrl(inp) == proj) { // Found local op
 702         cost++;
 703         // Check for a chain of dependent ops; these will all become
 704         // speculative in a CMOV.
 705         for (uint k = 1; k < inp->req(); k++)
 706           if (get_ctrl(inp->in(k)) == proj)
 707             cost += ConditionalMoveLimit; // Too much speculative goo
 708       }
 709     }
 710     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 711     // This will likely Split-If, a higher-payoff operation.
 712     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 713       Node* use = phi->fast_out(k);
 714       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 715         cost += ConditionalMoveLimit;
 716       // Is there a use inside the loop?
 717       // Note: check only basic types since CMoveP is pinned.
 718       if (!used_inside_loop && is_java_primitive(bt)) {
 719         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 720         if (r_loop == u_loop || r_loop->is_member(u_loop)) {

1006             assert(get_loop(lca)->_nest < n_loop->_nest || lca->in(0)->is_NeverBranch(), "must not be moved into inner loop");
1007 
1008             // Move store out of the loop
1009             _igvn.replace_node(hook, n->in(MemNode::Memory));
1010             _igvn.replace_input_of(n, 0, lca);
1011             set_ctrl_and_loop(n, lca);
1012 
1013             // Disconnect the phi now. An empty phi can confuse other
1014             // optimizations in this pass of loop opts..
1015             if (phi->in(LoopNode::LoopBackControl) == phi) {
1016               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1017               n_loop->_body.yank(phi);
1018             }
1019           }
1020         }
1021       }
1022     }
1023   }
1024 }
1025 

















































1026 //------------------------------split_if_with_blocks_pre-----------------------
1027 // Do the real work in a non-recursive function.  Data nodes want to be
1028 // cloned in the pre-order so they can feed each other nicely.
1029 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1030   // Cloning these guys is unlikely to win
1031   int n_op = n->Opcode();
1032   if (n_op == Op_MergeMem) {
1033     return n;
1034   }
1035   if (n->is_Proj()) {
1036     return n;
1037   }






1038   // Do not clone-up CmpFXXX variations, as these are always
1039   // followed by a CmpI
1040   if (n->is_Cmp()) {
1041     return n;
1042   }
1043   // Attempt to use a conditional move instead of a phi/branch
1044   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1045     Node *cmov = conditional_move( n );
1046     if (cmov) {
1047       return cmov;
1048     }
1049   }
1050   if (n->is_CFG() || n->is_LoadStore()) {
1051     return n;
1052   }
1053   if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1054     if (!C->major_progress()) {   // If chance of no more loop opts...
1055       _igvn._worklist.push(n);  // maybe we'll remove them
1056     }
1057     return n;

1295 
1296   return true;
1297 }
1298 
1299 // Detect if the node is the inner strip-mined loop
1300 // Return: null if it's not the case, or the exit of outer strip-mined loop
1301 static Node* is_inner_of_stripmined_loop(const Node* out) {
1302   Node* out_le = nullptr;
1303 
1304   if (out->is_CountedLoopEnd()) {
1305       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1306 
1307       if (loop != nullptr && loop->is_strip_mined()) {
1308         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1309       }
1310   }
1311 
1312   return out_le;
1313 }
1314 


































































































1315 //------------------------------split_if_with_blocks_post----------------------
1316 // Do the real work in a non-recursive function.  CFG hackery wants to be
1317 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1318 // info.
1319 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1320 




1321   // Cloning Cmp through Phi's involves the split-if transform.
1322   // FastLock is not used by an If
1323   if (n->is_Cmp() && !n->is_FastLock()) {
1324     Node *n_ctrl = get_ctrl(n);
1325     // Determine if the Node has inputs from some local Phi.
1326     // Returns the block to clone thru.
1327     Node *n_blk = has_local_phi_input(n);
1328     if (n_blk != n_ctrl) {
1329       return;
1330     }
1331 
1332     if (!can_split_if(n_ctrl)) {
1333       return;
1334     }
1335 
1336     if (n->outcnt() != 1) {
1337       return; // Multiple bool's from 1 compare?
1338     }
1339     Node *bol = n->unique_out();
1340     assert(bol->is_Bool(), "expect a bool here");

1433           Node* out_le = is_inner_of_stripmined_loop(dom);
1434           if (out_le != nullptr) {
1435             prevdom = out_le;
1436           }
1437           // Replace the dominated test with an obvious true or false.
1438           // Place it on the IGVN worklist for later cleanup.
1439           C->set_major_progress();
1440           dominated_by(prevdom->as_IfProj(), n->as_If(), false, true);
1441           DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1442           return;
1443         }
1444         prevdom = dom;
1445         dom = idom(prevdom);
1446       }
1447     }
1448   }
1449 
1450   try_sink_out_of_loop(n);
1451 
1452   try_move_store_after_loop(n);





1453 }
1454 
1455 // Transform:
1456 //
1457 // if (some_condition) {
1458 //   // body 1
1459 // } else {
1460 //   // body 2
1461 // }
1462 // if (some_condition) {
1463 //   // body 3
1464 // } else {
1465 //   // body 4
1466 // }
1467 //
1468 // into:
1469 //
1470 //
1471 // if (some_condition) {
1472 //   // body 1

1866   uint i;
1867   for (i = 1; i < phi->req(); i++) {
1868     Node *b = phi->in(i);
1869     if (b->is_Phi()) {
1870       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
1871     } else {
1872       assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
1873     }
1874   }
1875 
1876   Node* n = phi->in(1);
1877   Node* sample_opaque = nullptr;
1878   Node *sample_bool = nullptr;
1879   if (n->Opcode() == Op_Opaque4) {
1880     sample_opaque = n;
1881     sample_bool = n->in(1);
1882     assert(sample_bool->is_Bool(), "wrong type");
1883   } else {
1884     sample_bool = n;
1885   }
1886   Node *sample_cmp = sample_bool->in(1);








1887 
1888   // Make Phis to merge the Cmp's inputs.
1889   PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
1890   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
1891   for (i = 1; i < phi->req(); i++) {
1892     Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
1893     Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
1894     phi1->set_req(i, n1);
1895     phi2->set_req(i, n2);
1896     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1897     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1898   }
1899   // See if these Phis have been made before.
1900   // Register with optimizer
1901   Node *hit1 = _igvn.hash_find_insert(phi1);
1902   if (hit1) {                   // Hit, toss just made Phi
1903     _igvn.remove_dead_node(phi1); // Remove new phi
1904     assert(hit1->is_Phi(), "" );
1905     phi1 = (PhiNode*)hit1;      // Use existing phi
1906   } else {                      // Miss
1907     _igvn.register_new_node_with_optimizer(phi1);
1908   }
1909   Node *hit2 = _igvn.hash_find_insert(phi2);

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/divnode.hpp"
  36 #include "opto/inlinetypenode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/matcher.hpp"
  39 #include "opto/mulnode.hpp"
  40 #include "opto/movenode.hpp"
  41 #include "opto/opaquenode.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/subnode.hpp"
  44 #include "opto/subtypenode.hpp"
  45 #include "opto/vectornode.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 //=============================================================================
  49 //------------------------------split_thru_phi---------------------------------
  50 // Split Node 'n' through merge point if there is enough win.
  51 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  52   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
  53     // ConvI2L may have type information on it which is unsafe to push up
  54     // so disable this for now
  55     return nullptr;
  56   }
  57 
  58   // Splitting range check CastIIs through a loop induction Phi can
  59   // cause new Phis to be created that are left unrelated to the loop
  60   // induction Phi and prevent optimizations (vectorization)
  61   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  62       n->in(1) == region->as_CountedLoop()->phi()) {
  63     return nullptr;
  64   }
  65 
  66   // Inline types should not be split through Phis because they cannot be merged
  67   // through Phi nodes but each value input needs to be merged individually.
  68   if (n->is_InlineType()) {
  69     return nullptr;
  70   }
  71 
  72   if (cannot_split_division(n, region)) {
  73     return nullptr;
  74   }
  75 
  76   int wins = 0;
  77   assert(!n->is_CFG(), "");
  78   assert(region->is_Region(), "");
  79 
  80   const Type* type = n->bottom_type();
  81   const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
  82   Node* phi;
  83   if (t_oop != nullptr && t_oop->is_known_instance_field()) {
  84     int iid    = t_oop->instance_id();
  85     int index  = C->get_alias_index(t_oop);
  86     int offset = t_oop->offset();
  87     phi = new PhiNode(region, type, nullptr, iid, index, offset);
  88   } else {
  89     phi = PhiNode::make_blank(region, n);
  90   }
  91   uint old_unique = C->unique();

 688       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 689       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 690       // have a Phi for the base here that we convert to a CMOVE all is well
 691       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 692       // the allocator will have to produce a base by creating a CMOVE of the
 693       // relevant bases.  This puts the allocator in the business of
 694       // manufacturing expensive instructions, generally a bad plan.
 695       // Just Say No to Conditionally-Moved Derived Pointers.
 696       if (tp && tp->offset() != 0)
 697         return nullptr;
 698       cost++;
 699       break;
 700     }
 701     default:
 702       return nullptr;              // In particular, can't do memory or I/O
 703     }
 704     // Add in cost any speculative ops
 705     for (uint j = 1; j < region->req(); j++) {
 706       Node *proj = region->in(j);
 707       Node *inp = phi->in(j);
 708       if (inp->isa_InlineType()) {
 709         // TODO 8302217 This prevents PhiNode::push_inline_types_through
 710         return nullptr;
 711       }
 712       if (get_ctrl(inp) == proj) { // Found local op
 713         cost++;
 714         // Check for a chain of dependent ops; these will all become
 715         // speculative in a CMOV.
 716         for (uint k = 1; k < inp->req(); k++)
 717           if (get_ctrl(inp->in(k)) == proj)
 718             cost += ConditionalMoveLimit; // Too much speculative goo
 719       }
 720     }
 721     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 722     // This will likely Split-If, a higher-payoff operation.
 723     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 724       Node* use = phi->fast_out(k);
 725       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 726         cost += ConditionalMoveLimit;
 727       // Is there a use inside the loop?
 728       // Note: check only basic types since CMoveP is pinned.
 729       if (!used_inside_loop && is_java_primitive(bt)) {
 730         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 731         if (r_loop == u_loop || r_loop->is_member(u_loop)) {

1017             assert(get_loop(lca)->_nest < n_loop->_nest || lca->in(0)->is_NeverBranch(), "must not be moved into inner loop");
1018 
1019             // Move store out of the loop
1020             _igvn.replace_node(hook, n->in(MemNode::Memory));
1021             _igvn.replace_input_of(n, 0, lca);
1022             set_ctrl_and_loop(n, lca);
1023 
1024             // Disconnect the phi now. An empty phi can confuse other
1025             // optimizations in this pass of loop opts..
1026             if (phi->in(LoopNode::LoopBackControl) == phi) {
1027               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1028               n_loop->_body.yank(phi);
1029             }
1030           }
1031         }
1032       }
1033     }
1034   }
1035 }
1036 
1037 // If UseArrayMarkWordCheck is enabled, we can't use immutable memory for the flat array check
1038 // because we are loading the mark word which is mutable. Although the bits we are interested in
1039 // are immutable (we check for markWord::unlocked_value), we need to use raw memory to not break
1040 // anti dependency analysis. Below code will attempt to still move flat array checks out of loops,
1041 // mainly to enable loop unswitching.
1042 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
1043   // Skip checks for more than one array
1044   if (n->req() > 3) {
1045     return;
1046   }
1047   Node* mem = n->in(FlatArrayCheckNode::Memory);
1048   Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast();
1049   IdealLoopTree* check_loop = get_loop(get_ctrl(n));
1050   IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
1051 
1052   // Check if array is loop invariant
1053   if (!check_loop->is_member(ary_loop)) {
1054     // Walk up memory graph from the check until we leave the loop
1055     VectorSet wq;
1056     wq.set(mem->_idx);
1057     while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
1058       if (mem->is_Phi()) {
1059         mem = mem->in(1);
1060       } else if (mem->is_MergeMem()) {
1061         mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1062       } else if (mem->is_Proj()) {
1063         mem = mem->in(0);
1064       } else if (mem->is_MemBar() || mem->is_SafePoint()) {
1065         mem = mem->in(TypeFunc::Memory);
1066       } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1067         mem = mem->in(MemNode::Memory);
1068       } else {
1069 #ifdef ASSERT
1070         mem->dump();
1071 #endif
1072         ShouldNotReachHere();
1073       }
1074       if (wq.test_set(mem->_idx)) {
1075         return;
1076       }
1077     }
1078     // Replace memory input and re-compute ctrl to move the check out of the loop
1079     _igvn.replace_input_of(n, 1, mem);
1080     set_ctrl_and_loop(n, get_early_ctrl(n));
1081     Node* bol = n->unique_out();
1082     set_ctrl_and_loop(bol, get_early_ctrl(bol));
1083   }
1084 }
1085 
1086 //------------------------------split_if_with_blocks_pre-----------------------
1087 // Do the real work in a non-recursive function.  Data nodes want to be
1088 // cloned in the pre-order so they can feed each other nicely.
1089 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1090   // Cloning these guys is unlikely to win
1091   int n_op = n->Opcode();
1092   if (n_op == Op_MergeMem) {
1093     return n;
1094   }
1095   if (n->is_Proj()) {
1096     return n;
1097   }
1098 
1099   if (UseArrayMarkWordCheck && n->isa_FlatArrayCheck()) {
1100     move_flat_array_check_out_of_loop(n);
1101     return n;
1102   }
1103 
1104   // Do not clone-up CmpFXXX variations, as these are always
1105   // followed by a CmpI
1106   if (n->is_Cmp()) {
1107     return n;
1108   }
1109   // Attempt to use a conditional move instead of a phi/branch
1110   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1111     Node *cmov = conditional_move( n );
1112     if (cmov) {
1113       return cmov;
1114     }
1115   }
1116   if (n->is_CFG() || n->is_LoadStore()) {
1117     return n;
1118   }
1119   if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1120     if (!C->major_progress()) {   // If chance of no more loop opts...
1121       _igvn._worklist.push(n);  // maybe we'll remove them
1122     }
1123     return n;

1361 
1362   return true;
1363 }
1364 
1365 // Detect if the node is the inner strip-mined loop
1366 // Return: null if it's not the case, or the exit of outer strip-mined loop
1367 static Node* is_inner_of_stripmined_loop(const Node* out) {
1368   Node* out_le = nullptr;
1369 
1370   if (out->is_CountedLoopEnd()) {
1371       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1372 
1373       if (loop != nullptr && loop->is_strip_mined()) {
1374         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1375       }
1376   }
1377 
1378   return out_le;
1379 }
1380 
1381 bool PhaseIdealLoop::flat_array_element_type_check(Node *n) {
1382   // If the CmpP is a subtype check for a value that has just been
1383   // loaded from an array, the subtype check guarantees the value
1384   // can't be stored in a flat array and the load of the value
1385   // happens with a flat array check then: push the type check
1386   // through the phi of the flat array check. This needs special
1387   // logic because the subtype check's input is not a phi but a
1388   // LoadKlass that must first be cloned through the phi.
1389   if (n->Opcode() != Op_CmpP) {
1390     return false;
1391   }
1392 
1393   Node* klassptr = n->in(1);
1394   Node* klasscon = n->in(2);
1395 
1396   if (klassptr->is_DecodeNarrowPtr()) {
1397     klassptr = klassptr->in(1);
1398   }
1399 
1400   if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
1401     return false;
1402   }
1403 
1404   if (!klasscon->is_Con()) {
1405     return false;
1406   }
1407 
1408   Node* addr = klassptr->in(MemNode::Address);
1409 
1410   if (!addr->is_AddP()) {
1411     return false;
1412   }
1413 
1414   intptr_t offset;
1415   Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
1416 
1417   if (obj == nullptr) {
1418     return false;
1419   }
1420 
1421   assert(obj != nullptr && addr->in(AddPNode::Base) == addr->in(AddPNode::Address), "malformed AddP?");
1422   if (obj->Opcode() == Op_CastPP) {
1423     obj = obj->in(1);
1424   }
1425 
1426   if (!obj->is_Phi()) {
1427     return false;
1428   }
1429 
1430   Node* region = obj->in(0);
1431 
1432   Node* phi = PhiNode::make_blank(region, n->in(1));
1433   for (uint i = 1; i < region->req(); i++) {
1434     Node* in = obj->in(i);
1435     Node* ctrl = region->in(i);
1436     if (addr->in(AddPNode::Base) != obj) {
1437       Node* cast = addr->in(AddPNode::Base);
1438       assert(cast->Opcode() == Op_CastPP && cast->in(0) != nullptr, "inconsistent subgraph");
1439       Node* cast_clone = cast->clone();
1440       cast_clone->set_req(0, ctrl);
1441       cast_clone->set_req(1, in);
1442       register_new_node(cast_clone, ctrl);
1443       const Type* tcast = cast_clone->Value(&_igvn);
1444       _igvn.set_type(cast_clone, tcast);
1445       cast_clone->as_Type()->set_type(tcast);
1446       in = cast_clone;
1447     }
1448     Node* addr_clone = addr->clone();
1449     addr_clone->set_req(AddPNode::Base, in);
1450     addr_clone->set_req(AddPNode::Address, in);
1451     register_new_node(addr_clone, ctrl);
1452     _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
1453     Node* klassptr_clone = klassptr->clone();
1454     klassptr_clone->set_req(2, addr_clone);
1455     register_new_node(klassptr_clone, ctrl);
1456     _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
1457     if (klassptr != n->in(1)) {
1458       Node* decode = n->in(1);
1459       assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
1460       Node* decode_clone = decode->clone();
1461       decode_clone->set_req(1, klassptr_clone);
1462       register_new_node(decode_clone, ctrl);
1463       _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
1464       klassptr_clone = decode_clone;
1465     }
1466     phi->set_req(i, klassptr_clone);
1467   }
1468   register_new_node(phi, region);
1469   Node* orig = n->in(1);
1470   _igvn.replace_input_of(n, 1, phi);
1471   split_if_with_blocks_post(n);
1472   if (n->outcnt() != 0) {
1473     _igvn.replace_input_of(n, 1, orig);
1474     _igvn.remove_dead_node(phi);
1475   }
1476   return true;
1477 }
1478 
1479 //------------------------------split_if_with_blocks_post----------------------
1480 // Do the real work in a non-recursive function.  CFG hackery wants to be
1481 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1482 // info.
1483 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1484 
1485   if (flat_array_element_type_check(n)) {
1486     return;
1487   }
1488 
1489   // Cloning Cmp through Phi's involves the split-if transform.
1490   // FastLock is not used by an If
1491   if (n->is_Cmp() && !n->is_FastLock()) {
1492     Node *n_ctrl = get_ctrl(n);
1493     // Determine if the Node has inputs from some local Phi.
1494     // Returns the block to clone thru.
1495     Node *n_blk = has_local_phi_input(n);
1496     if (n_blk != n_ctrl) {
1497       return;
1498     }
1499 
1500     if (!can_split_if(n_ctrl)) {
1501       return;
1502     }
1503 
1504     if (n->outcnt() != 1) {
1505       return; // Multiple bool's from 1 compare?
1506     }
1507     Node *bol = n->unique_out();
1508     assert(bol->is_Bool(), "expect a bool here");

1601           Node* out_le = is_inner_of_stripmined_loop(dom);
1602           if (out_le != nullptr) {
1603             prevdom = out_le;
1604           }
1605           // Replace the dominated test with an obvious true or false.
1606           // Place it on the IGVN worklist for later cleanup.
1607           C->set_major_progress();
1608           dominated_by(prevdom->as_IfProj(), n->as_If(), false, true);
1609           DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1610           return;
1611         }
1612         prevdom = dom;
1613         dom = idom(prevdom);
1614       }
1615     }
1616   }
1617 
1618   try_sink_out_of_loop(n);
1619 
1620   try_move_store_after_loop(n);
1621 
1622   // Remove multiple allocations of the same inline type
1623   if (n->is_InlineType()) {
1624     n->as_InlineType()->remove_redundant_allocations(this);
1625   }
1626 }
1627 
1628 // Transform:
1629 //
1630 // if (some_condition) {
1631 //   // body 1
1632 // } else {
1633 //   // body 2
1634 // }
1635 // if (some_condition) {
1636 //   // body 3
1637 // } else {
1638 //   // body 4
1639 // }
1640 //
1641 // into:
1642 //
1643 //
1644 // if (some_condition) {
1645 //   // body 1

2039   uint i;
2040   for (i = 1; i < phi->req(); i++) {
2041     Node *b = phi->in(i);
2042     if (b->is_Phi()) {
2043       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2044     } else {
2045       assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
2046     }
2047   }
2048 
2049   Node* n = phi->in(1);
2050   Node* sample_opaque = nullptr;
2051   Node *sample_bool = nullptr;
2052   if (n->Opcode() == Op_Opaque4) {
2053     sample_opaque = n;
2054     sample_bool = n->in(1);
2055     assert(sample_bool->is_Bool(), "wrong type");
2056   } else {
2057     sample_bool = n;
2058   }
2059   Node* sample_cmp = sample_bool->in(1);
2060   const Type* t = Type::TOP;
2061   const TypePtr* at = nullptr;
2062   if (sample_cmp->is_FlatArrayCheck()) {
2063     // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
2064     assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
2065     t = Type::MEMORY;
2066     at = TypeRawPtr::BOTTOM;
2067   }
2068 
2069   // Make Phis to merge the Cmp's inputs.
2070   PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
2071   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2072   for (i = 1; i < phi->req(); i++) {
2073     Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2074     Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2075     phi1->set_req(i, n1);
2076     phi2->set_req(i, n2);
2077     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2078     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2079   }
2080   // See if these Phis have been made before.
2081   // Register with optimizer
2082   Node *hit1 = _igvn.hash_find_insert(phi1);
2083   if (hit1) {                   // Hit, toss just made Phi
2084     _igvn.remove_dead_node(phi1); // Remove new phi
2085     assert(hit1->is_Phi(), "" );
2086     phi1 = (PhiNode*)hit1;      // Use existing phi
2087   } else {                      // Miss
2088     _igvn.register_new_node_with_optimizer(phi1);
2089   }
2090   Node *hit2 = _igvn.hash_find_insert(phi2);
< prev index next >