16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/callnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/connode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/divnode.hpp"
36 #include "opto/loopnode.hpp"
37 #include "opto/matcher.hpp"
38 #include "opto/mulnode.hpp"
39 #include "opto/movenode.hpp"
40 #include "opto/opaquenode.hpp"
41 #include "opto/rootnode.hpp"
42 #include "opto/subnode.hpp"
43 #include "opto/subtypenode.hpp"
44 #include "utilities/macros.hpp"
45
46 //=============================================================================
47 //------------------------------split_thru_phi---------------------------------
48 // Split Node 'n' through merge point if there is enough win.
49 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
50 if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
51 // ConvI2L may have type information on it which is unsafe to push up
52 // so disable this for now
53 return NULL;
54 }
55
56 // Splitting range check CastIIs through a loop induction Phi can
57 // cause new Phis to be created that are left unrelated to the loop
58 // induction Phi and prevent optimizations (vectorization)
59 if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
60 n->in(1) == region->as_CountedLoop()->phi()) {
61 return NULL;
62 }
63
64 if (cannot_split_division(n, region)) {
65 return NULL;
66 }
67
68 int wins = 0;
69 assert(!n->is_CFG(), "");
70 assert(region->is_Region(), "");
71
72 const Type* type = n->bottom_type();
73 const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
74 Node* phi;
75 if (t_oop != NULL && t_oop->is_known_instance_field()) {
76 int iid = t_oop->instance_id();
77 int index = C->get_alias_index(t_oop);
78 int offset = t_oop->offset();
79 phi = new PhiNode(region, type, NULL, iid, index, offset);
80 } else {
81 phi = PhiNode::make_blank(region, n);
82 }
83 uint old_unique = C->unique();
1004 assert(get_loop(lca)->_nest < n_loop->_nest || lca->in(0)->is_NeverBranch(), "must not be moved into inner loop");
1005
1006 // Move store out of the loop
1007 _igvn.replace_node(hook, n->in(MemNode::Memory));
1008 _igvn.replace_input_of(n, 0, lca);
1009 set_ctrl_and_loop(n, lca);
1010
1011 // Disconnect the phi now. An empty phi can confuse other
1012 // optimizations in this pass of loop opts..
1013 if (phi->in(LoopNode::LoopBackControl) == phi) {
1014 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1015 n_loop->_body.yank(phi);
1016 }
1017 }
1018 }
1019 }
1020 }
1021 }
1022 }
1023
1024 //------------------------------split_if_with_blocks_pre-----------------------
1025 // Do the real work in a non-recursive function. Data nodes want to be
1026 // cloned in the pre-order so they can feed each other nicely.
1027 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1028 // Cloning these guys is unlikely to win
1029 int n_op = n->Opcode();
1030 if (n_op == Op_MergeMem) {
1031 return n;
1032 }
1033 if (n->is_Proj()) {
1034 return n;
1035 }
1036 // Do not clone-up CmpFXXX variations, as these are always
1037 // followed by a CmpI
1038 if (n->is_Cmp()) {
1039 return n;
1040 }
1041 // Attempt to use a conditional move instead of a phi/branch
1042 if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1043 Node *cmov = conditional_move( n );
1044 if (cmov) {
1045 return cmov;
1046 }
1047 }
1048 if (n->is_CFG() || n->is_LoadStore()) {
1049 return n;
1050 }
1051 if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1052 if (!C->major_progress()) { // If chance of no more loop opts...
1053 _igvn._worklist.push(n); // maybe we'll remove them
1054 }
1055 return n;
1293
1294 return true;
1295 }
1296
1297 // Detect if the node is the inner strip-mined loop
1298 // Return: NULL if it's not the case, or the exit of outer strip-mined loop
1299 static Node* is_inner_of_stripmined_loop(const Node* out) {
1300 Node* out_le = NULL;
1301
1302 if (out->is_CountedLoopEnd()) {
1303 const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1304
1305 if (loop != NULL && loop->is_strip_mined()) {
1306 out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1307 }
1308 }
1309
1310 return out_le;
1311 }
1312
1313 //------------------------------split_if_with_blocks_post----------------------
1314 // Do the real work in a non-recursive function. CFG hackery wants to be
1315 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1316 // info.
1317 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1318
1319 // Cloning Cmp through Phi's involves the split-if transform.
1320 // FastLock is not used by an If
1321 if (n->is_Cmp() && !n->is_FastLock()) {
1322 Node *n_ctrl = get_ctrl(n);
1323 // Determine if the Node has inputs from some local Phi.
1324 // Returns the block to clone thru.
1325 Node *n_blk = has_local_phi_input(n);
1326 if (n_blk != n_ctrl) {
1327 return;
1328 }
1329
1330 if (!can_split_if(n_ctrl)) {
1331 return;
1332 }
1333
1334 if (n->outcnt() != 1) {
1335 return; // Multiple bool's from 1 compare?
1336 }
1337 Node *bol = n->unique_out();
1338 assert(bol->is_Bool(), "expect a bool here");
1433 prevdom = out_le;
1434 }
1435 // Replace the dominated test with an obvious true or false.
1436 // Place it on the IGVN worklist for later cleanup.
1437 C->set_major_progress();
1438 dominated_by(prevdom->as_IfProj(), n->as_If(), false, true);
1439 #ifndef PRODUCT
1440 if( VerifyLoopOptimizations ) verify();
1441 #endif
1442 return;
1443 }
1444 prevdom = dom;
1445 dom = idom(prevdom);
1446 }
1447 }
1448 }
1449
1450 try_sink_out_of_loop(n);
1451
1452 try_move_store_after_loop(n);
1453 }
1454
1455 // Transform:
1456 //
1457 // if (some_condition) {
1458 // // body 1
1459 // } else {
1460 // // body 2
1461 // }
1462 // if (some_condition) {
1463 // // body 3
1464 // } else {
1465 // // body 4
1466 // }
1467 //
1468 // into:
1469 //
1470 //
1471 // if (some_condition) {
1472 // // body 1
1860 uint i;
1861 for (i = 1; i < phi->req(); i++) {
1862 Node *b = phi->in(i);
1863 if (b->is_Phi()) {
1864 _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
1865 } else {
1866 assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
1867 }
1868 }
1869
1870 Node* n = phi->in(1);
1871 Node* sample_opaque = NULL;
1872 Node *sample_bool = NULL;
1873 if (n->Opcode() == Op_Opaque4) {
1874 sample_opaque = n;
1875 sample_bool = n->in(1);
1876 assert(sample_bool->is_Bool(), "wrong type");
1877 } else {
1878 sample_bool = n;
1879 }
1880 Node *sample_cmp = sample_bool->in(1);
1881
1882 // Make Phis to merge the Cmp's inputs.
1883 PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
1884 PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
1885 for (i = 1; i < phi->req(); i++) {
1886 Node *n1 = sample_opaque == NULL ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
1887 Node *n2 = sample_opaque == NULL ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
1888 phi1->set_req(i, n1);
1889 phi2->set_req(i, n2);
1890 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1891 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1892 }
1893 // See if these Phis have been made before.
1894 // Register with optimizer
1895 Node *hit1 = _igvn.hash_find_insert(phi1);
1896 if (hit1) { // Hit, toss just made Phi
1897 _igvn.remove_dead_node(phi1); // Remove new phi
1898 assert(hit1->is_Phi(), "" );
1899 phi1 = (PhiNode*)hit1; // Use existing phi
1900 } else { // Miss
1901 _igvn.register_new_node_with_optimizer(phi1);
1902 }
1903 Node *hit2 = _igvn.hash_find_insert(phi2);
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/callnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/connode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/divnode.hpp"
36 #include "opto/inlinetypenode.hpp"
37 #include "opto/loopnode.hpp"
38 #include "opto/matcher.hpp"
39 #include "opto/mulnode.hpp"
40 #include "opto/movenode.hpp"
41 #include "opto/opaquenode.hpp"
42 #include "opto/rootnode.hpp"
43 #include "opto/subnode.hpp"
44 #include "opto/subtypenode.hpp"
45 #include "utilities/macros.hpp"
46
47 //=============================================================================
48 //------------------------------split_thru_phi---------------------------------
49 // Split Node 'n' through merge point if there is enough win.
50 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
51 if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
52 // ConvI2L may have type information on it which is unsafe to push up
53 // so disable this for now
54 return NULL;
55 }
56
57 // Splitting range check CastIIs through a loop induction Phi can
58 // cause new Phis to be created that are left unrelated to the loop
59 // induction Phi and prevent optimizations (vectorization)
60 if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
61 n->in(1) == region->as_CountedLoop()->phi()) {
62 return NULL;
63 }
64
65 // Inline types should not be split through Phis because they cannot be merged
66 // through Phi nodes but each value input needs to be merged individually.
67 if (n->is_InlineType()) {
68 return NULL;
69 }
70
71 if (cannot_split_division(n, region)) {
72 return NULL;
73 }
74
75 int wins = 0;
76 assert(!n->is_CFG(), "");
77 assert(region->is_Region(), "");
78
79 const Type* type = n->bottom_type();
80 const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
81 Node* phi;
82 if (t_oop != NULL && t_oop->is_known_instance_field()) {
83 int iid = t_oop->instance_id();
84 int index = C->get_alias_index(t_oop);
85 int offset = t_oop->offset();
86 phi = new PhiNode(region, type, NULL, iid, index, offset);
87 } else {
88 phi = PhiNode::make_blank(region, n);
89 }
90 uint old_unique = C->unique();
1011 assert(get_loop(lca)->_nest < n_loop->_nest || lca->in(0)->is_NeverBranch(), "must not be moved into inner loop");
1012
1013 // Move store out of the loop
1014 _igvn.replace_node(hook, n->in(MemNode::Memory));
1015 _igvn.replace_input_of(n, 0, lca);
1016 set_ctrl_and_loop(n, lca);
1017
1018 // Disconnect the phi now. An empty phi can confuse other
1019 // optimizations in this pass of loop opts..
1020 if (phi->in(LoopNode::LoopBackControl) == phi) {
1021 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1022 n_loop->_body.yank(phi);
1023 }
1024 }
1025 }
1026 }
1027 }
1028 }
1029 }
1030
1031 // If UseArrayMarkWordCheck is enabled, we can't use immutable memory for the flat array check
1032 // because we are loading the mark word which is mutable. Although the bits we are interested in
1033 // are immutable (we check for markWord::unlocked_value), we need to use raw memory to not break
1034 // anti dependency analysis. Below code will attempt to still move flat array checks out of loops,
1035 // mainly to enable loop unswitching.
1036 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
1037 // Skip checks for more than one array
1038 if (n->req() > 3) {
1039 return;
1040 }
1041 Node* mem = n->in(FlatArrayCheckNode::Memory);
1042 Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast();
1043 IdealLoopTree* check_loop = get_loop(get_ctrl(n));
1044 IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
1045
1046 // Check if array is loop invariant
1047 if (!check_loop->is_member(ary_loop)) {
1048 // Walk up memory graph from the check until we leave the loop
1049 VectorSet wq;
1050 wq.set(mem->_idx);
1051 while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
1052 if (mem->is_Phi()) {
1053 mem = mem->in(1);
1054 } else if (mem->is_MergeMem()) {
1055 mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1056 } else if (mem->is_Proj()) {
1057 mem = mem->in(0);
1058 } else if (mem->is_MemBar() || mem->is_SafePoint()) {
1059 mem = mem->in(TypeFunc::Memory);
1060 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1061 mem = mem->in(MemNode::Memory);
1062 } else {
1063 #ifdef ASSERT
1064 mem->dump();
1065 #endif
1066 ShouldNotReachHere();
1067 }
1068 if (wq.test_set(mem->_idx)) {
1069 return;
1070 }
1071 }
1072 // Replace memory input and re-compute ctrl to move the check out of the loop
1073 _igvn.replace_input_of(n, 1, mem);
1074 set_ctrl_and_loop(n, get_early_ctrl(n));
1075 Node* bol = n->unique_out();
1076 set_ctrl_and_loop(bol, get_early_ctrl(bol));
1077 }
1078 }
1079
1080 //------------------------------split_if_with_blocks_pre-----------------------
1081 // Do the real work in a non-recursive function. Data nodes want to be
1082 // cloned in the pre-order so they can feed each other nicely.
1083 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1084 // Cloning these guys is unlikely to win
1085 int n_op = n->Opcode();
1086 if (n_op == Op_MergeMem) {
1087 return n;
1088 }
1089 if (n->is_Proj()) {
1090 return n;
1091 }
1092
1093 if (UseArrayMarkWordCheck && n->isa_FlatArrayCheck()) {
1094 move_flat_array_check_out_of_loop(n);
1095 return n;
1096 }
1097
1098 // Do not clone-up CmpFXXX variations, as these are always
1099 // followed by a CmpI
1100 if (n->is_Cmp()) {
1101 return n;
1102 }
1103 // Attempt to use a conditional move instead of a phi/branch
1104 if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1105 Node *cmov = conditional_move( n );
1106 if (cmov) {
1107 return cmov;
1108 }
1109 }
1110 if (n->is_CFG() || n->is_LoadStore()) {
1111 return n;
1112 }
1113 if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1114 if (!C->major_progress()) { // If chance of no more loop opts...
1115 _igvn._worklist.push(n); // maybe we'll remove them
1116 }
1117 return n;
1355
1356 return true;
1357 }
1358
1359 // Detect if the node is the inner strip-mined loop
1360 // Return: NULL if it's not the case, or the exit of outer strip-mined loop
1361 static Node* is_inner_of_stripmined_loop(const Node* out) {
1362 Node* out_le = NULL;
1363
1364 if (out->is_CountedLoopEnd()) {
1365 const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1366
1367 if (loop != NULL && loop->is_strip_mined()) {
1368 out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1369 }
1370 }
1371
1372 return out_le;
1373 }
1374
1375 bool PhaseIdealLoop::flatten_array_element_type_check(Node *n) {
1376 // If the CmpP is a subtype check for a value that has just been
1377 // loaded from an array, the subtype check guarantees the value
1378 // can't be stored in a flattened array and the load of the value
1379 // happens with a flattened array check then: push the type check
1380 // through the phi of the flattened array check. This needs special
1381 // logic because the subtype check's input is not a phi but a
1382 // LoadKlass that must first be cloned through the phi.
1383 if (n->Opcode() != Op_CmpP) {
1384 return false;
1385 }
1386
1387 Node* klassptr = n->in(1);
1388 Node* klasscon = n->in(2);
1389
1390 if (klassptr->is_DecodeNarrowPtr()) {
1391 klassptr = klassptr->in(1);
1392 }
1393
1394 if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
1395 return false;
1396 }
1397
1398 if (!klasscon->is_Con()) {
1399 return false;
1400 }
1401
1402 Node* addr = klassptr->in(MemNode::Address);
1403
1404 if (!addr->is_AddP()) {
1405 return false;
1406 }
1407
1408 intptr_t offset;
1409 Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
1410
1411 if (obj == NULL) {
1412 return false;
1413 }
1414
1415 assert(obj != NULL && addr->in(AddPNode::Base) == addr->in(AddPNode::Address), "malformed AddP?");
1416 if (obj->Opcode() == Op_CastPP) {
1417 obj = obj->in(1);
1418 }
1419
1420 if (!obj->is_Phi()) {
1421 return false;
1422 }
1423
1424 Node* region = obj->in(0);
1425
1426 Node* phi = PhiNode::make_blank(region, n->in(1));
1427 for (uint i = 1; i < region->req(); i++) {
1428 Node* in = obj->in(i);
1429 Node* ctrl = region->in(i);
1430 if (addr->in(AddPNode::Base) != obj) {
1431 Node* cast = addr->in(AddPNode::Base);
1432 assert(cast->Opcode() == Op_CastPP && cast->in(0) != NULL, "inconsistent subgraph");
1433 Node* cast_clone = cast->clone();
1434 cast_clone->set_req(0, ctrl);
1435 cast_clone->set_req(1, in);
1436 register_new_node(cast_clone, ctrl);
1437 const Type* tcast = cast_clone->Value(&_igvn);
1438 _igvn.set_type(cast_clone, tcast);
1439 cast_clone->as_Type()->set_type(tcast);
1440 in = cast_clone;
1441 }
1442 Node* addr_clone = addr->clone();
1443 addr_clone->set_req(AddPNode::Base, in);
1444 addr_clone->set_req(AddPNode::Address, in);
1445 register_new_node(addr_clone, ctrl);
1446 _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
1447 Node* klassptr_clone = klassptr->clone();
1448 klassptr_clone->set_req(2, addr_clone);
1449 register_new_node(klassptr_clone, ctrl);
1450 _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
1451 if (klassptr != n->in(1)) {
1452 Node* decode = n->in(1);
1453 assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
1454 Node* decode_clone = decode->clone();
1455 decode_clone->set_req(1, klassptr_clone);
1456 register_new_node(decode_clone, ctrl);
1457 _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
1458 klassptr_clone = decode_clone;
1459 }
1460 phi->set_req(i, klassptr_clone);
1461 }
1462 register_new_node(phi, region);
1463 Node* orig = n->in(1);
1464 _igvn.replace_input_of(n, 1, phi);
1465 split_if_with_blocks_post(n);
1466 if (n->outcnt() != 0) {
1467 _igvn.replace_input_of(n, 1, orig);
1468 _igvn.remove_dead_node(phi);
1469 }
1470 return true;
1471 }
1472
1473 //------------------------------split_if_with_blocks_post----------------------
1474 // Do the real work in a non-recursive function. CFG hackery wants to be
1475 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1476 // info.
1477 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1478
1479 if (flatten_array_element_type_check(n)) {
1480 return;
1481 }
1482
1483 // Cloning Cmp through Phi's involves the split-if transform.
1484 // FastLock is not used by an If
1485 if (n->is_Cmp() && !n->is_FastLock()) {
1486 Node *n_ctrl = get_ctrl(n);
1487 // Determine if the Node has inputs from some local Phi.
1488 // Returns the block to clone thru.
1489 Node *n_blk = has_local_phi_input(n);
1490 if (n_blk != n_ctrl) {
1491 return;
1492 }
1493
1494 if (!can_split_if(n_ctrl)) {
1495 return;
1496 }
1497
1498 if (n->outcnt() != 1) {
1499 return; // Multiple bool's from 1 compare?
1500 }
1501 Node *bol = n->unique_out();
1502 assert(bol->is_Bool(), "expect a bool here");
1597 prevdom = out_le;
1598 }
1599 // Replace the dominated test with an obvious true or false.
1600 // Place it on the IGVN worklist for later cleanup.
1601 C->set_major_progress();
1602 dominated_by(prevdom->as_IfProj(), n->as_If(), false, true);
1603 #ifndef PRODUCT
1604 if( VerifyLoopOptimizations ) verify();
1605 #endif
1606 return;
1607 }
1608 prevdom = dom;
1609 dom = idom(prevdom);
1610 }
1611 }
1612 }
1613
1614 try_sink_out_of_loop(n);
1615
1616 try_move_store_after_loop(n);
1617
1618 // Remove multiple allocations of the same inline type
1619 if (n->is_InlineType()) {
1620 n->as_InlineType()->remove_redundant_allocations(this);
1621 }
1622 }
1623
1624 // Transform:
1625 //
1626 // if (some_condition) {
1627 // // body 1
1628 // } else {
1629 // // body 2
1630 // }
1631 // if (some_condition) {
1632 // // body 3
1633 // } else {
1634 // // body 4
1635 // }
1636 //
1637 // into:
1638 //
1639 //
1640 // if (some_condition) {
1641 // // body 1
2029 uint i;
2030 for (i = 1; i < phi->req(); i++) {
2031 Node *b = phi->in(i);
2032 if (b->is_Phi()) {
2033 _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2034 } else {
2035 assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
2036 }
2037 }
2038
2039 Node* n = phi->in(1);
2040 Node* sample_opaque = NULL;
2041 Node *sample_bool = NULL;
2042 if (n->Opcode() == Op_Opaque4) {
2043 sample_opaque = n;
2044 sample_bool = n->in(1);
2045 assert(sample_bool->is_Bool(), "wrong type");
2046 } else {
2047 sample_bool = n;
2048 }
2049 Node* sample_cmp = sample_bool->in(1);
2050 const Type* t = Type::TOP;
2051 const TypePtr* at = NULL;
2052 if (sample_cmp->is_FlatArrayCheck()) {
2053 // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
2054 assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
2055 t = Type::MEMORY;
2056 at = TypeRawPtr::BOTTOM;
2057 }
2058
2059 // Make Phis to merge the Cmp's inputs.
2060 PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
2061 PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2062 for (i = 1; i < phi->req(); i++) {
2063 Node *n1 = sample_opaque == NULL ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2064 Node *n2 = sample_opaque == NULL ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2065 phi1->set_req(i, n1);
2066 phi2->set_req(i, n2);
2067 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2068 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2069 }
2070 // See if these Phis have been made before.
2071 // Register with optimizer
2072 Node *hit1 = _igvn.hash_find_insert(phi1);
2073 if (hit1) { // Hit, toss just made Phi
2074 _igvn.remove_dead_node(phi1); // Remove new phi
2075 assert(hit1->is_Phi(), "" );
2076 phi1 = (PhiNode*)hit1; // Use existing phi
2077 } else { // Miss
2078 _igvn.register_new_node_with_optimizer(phi1);
2079 }
2080 Node *hit2 = _igvn.hash_find_insert(phi2);
|