15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/objArrayKlass.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/cfgnode.hpp"
33 #include "opto/connode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/loopnode.hpp"
36 #include "opto/machnode.hpp"
37 #include "opto/movenode.hpp"
38 #include "opto/mulnode.hpp"
39 #include "opto/narrowptrnode.hpp"
40 #include "opto/phaseX.hpp"
41 #include "opto/regalloc.hpp"
42 #include "opto/regmask.hpp"
43 #include "opto/runtime.hpp"
44 #include "opto/subnode.hpp"
45 #include "opto/vectornode.hpp"
46 #include "utilities/vmError.hpp"
47
48 // Portions of code courtesy of Clifford Click
49
50 // Optimization - Graph Style
51
52 //=============================================================================
53 //------------------------------Value------------------------------------------
54 // Compute the type of the RegionNode.
503 if (left_path == nullptr || right_path == nullptr) {
504 return false;
505 }
506 Node* diamond_if = left_path->in(0);
507 if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
508 // Not an IfNode merging a diamond or TOP.
509 return false;
510 }
511
512 // Check for a proper bool/cmp
513 const Node* bol = diamond_if->in(1);
514 if (!bol->is_Bool()) {
515 return false;
516 }
517 const Node* cmp = bol->in(1);
518 if (!cmp->is_Cmp()) {
519 return false;
520 }
521 return true;
522 }
523 //------------------------------Ideal------------------------------------------
524 // Return a node which is more "ideal" than the current node. Must preserve
525 // the CFG, but we can still strip out dead paths.
526 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
527 if( !can_reshape && !in(0) ) return nullptr; // Already degraded to a Copy
528 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
529
530 // Check for RegionNode with no Phi users and both inputs come from either
531 // arm of the same IF. If found, then the control-flow split is useless.
532 bool has_phis = false;
533 if (can_reshape) { // Need DU info to check for Phi users
534 try_clean_mem_phis(phase->is_IterGVN());
535 has_phis = (has_phi() != nullptr); // Cache result
536
537 if (!has_phis) { // No Phi users? Nothing merging?
538 for (uint i = 1; i < req()-1; i++) {
539 Node *if1 = in(i);
540 if( !if1 ) continue;
541 Node *iff = if1->in(0);
542 if( !iff || !iff->is_If() ) continue;
949 if (iff1 == iff2) {
950 igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
951 igvn->replace_input_of(region, idx1, iff1->in(0));
952 igvn->replace_input_of(region, idx2, igvn->C->top());
953 return (region == this); // Remove useless if (both projections map to the same control/value)
954 }
955 BoolNode* bol1 = iff1->in(1)->isa_Bool();
956 BoolNode* bol2 = iff2->in(1)->isa_Bool();
957 if (bol1 == nullptr || bol2 == nullptr) {
958 return false; // No bool inputs found
959 }
960 Node* cmp1 = bol1->in(1);
961 Node* cmp2 = bol2->in(1);
962 bool commute = false;
963 if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
964 return false; // No comparison
965 } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
966 cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
967 cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
968 cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
969 cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck()) {
970 // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
971 // SubTypeCheck is not commutative
972 return false;
973 } else if (cmp1 != cmp2) {
974 if (cmp1->in(1) == cmp2->in(2) &&
975 cmp1->in(2) == cmp2->in(1)) {
976 commute = true; // Same but swapped inputs, commute the test
977 } else {
978 return false; // Ifs are not comparing the same values
979 }
980 }
981 proj1 = proj1->other_if_proj();
982 proj2 = proj2->other_if_proj();
983 if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
984 proj2->unique_ctrl_out_or_null() == this) ||
985 (proj2->unique_ctrl_out_or_null() == iff1 &&
986 proj1->unique_ctrl_out_or_null() == this))) {
987 return false; // Ifs are not connected through other projs
988 }
989 // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged
1028 st->print("#reducible ");
1029 break;
1030 case RegionNode::LoopStatus::NeverIrreducibleEntry:
1031 break; // nothing
1032 }
1033 }
1034 #endif
1035
1036 // Find the one non-null required input. RegionNode only
1037 Node *Node::nonnull_req() const {
1038 assert( is_Region(), "" );
1039 for( uint i = 1; i < _cnt; i++ )
1040 if( in(i) )
1041 return in(i);
1042 ShouldNotReachHere();
1043 return nullptr;
1044 }
1045
1046
1047 //=============================================================================
1048 // note that these functions assume that the _adr_type field is flattened
1049 uint PhiNode::hash() const {
1050 const Type* at = _adr_type;
1051 return TypeNode::hash() + (at ? at->hash() : 0);
1052 }
1053 bool PhiNode::cmp( const Node &n ) const {
1054 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1055 }
1056 static inline
1057 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1058 if (at == nullptr || at == TypePtr::BOTTOM) return at;
1059 return Compile::current()->alias_type(at)->adr_type();
1060 }
1061
1062 //----------------------------make---------------------------------------------
1063 // create a new phi with edges matching r and set (initially) to x
1064 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1065 uint preds = r->req(); // Number of predecessor paths
1066 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at");
1067 PhiNode* p = new PhiNode(r, t, at);
1068 for (uint j = 1; j < preds; j++) {
1069 // Fill in all inputs, except those which the region does not yet have
1070 if (r->in(j) != nullptr)
1071 p->init_req(j, x);
1072 }
1073 return p;
1074 }
1075 PhiNode* PhiNode::make(Node* r, Node* x) {
1076 const Type* t = x->bottom_type();
1077 const TypePtr* at = nullptr;
1078 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1079 return make(r, x, t, at);
1080 }
1081 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1082 const Type* t = x->bottom_type();
1083 const TypePtr* at = nullptr;
1084 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1085 return new PhiNode(r, t, at);
1086 }
1175 np->as_Phi()->verify_adr_type(visited, at);
1176 } else if (n->bottom_type() == Type::TOP
1177 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1178 // ignore top inputs
1179 } else {
1180 const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1181 // recheck phi/non-phi consistency at leaves:
1182 assert((nat != nullptr) == (at != nullptr), "");
1183 assert(nat == at || nat == TypePtr::BOTTOM,
1184 "adr_type must be consistent at leaves of phi nest");
1185 }
1186 }
1187 }
1188
1189 // Verify a whole nest of phis rooted at this one.
1190 void PhiNode::verify_adr_type(bool recursive) const {
1191 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error
1192 if (Node::in_dump()) return; // muzzle asserts when printing
1193
1194 assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");
1195
1196 if (!VerifyAliases) return; // verify thoroughly only if requested
1197
1198 assert(_adr_type == flatten_phi_adr_type(_adr_type),
1199 "Phi::adr_type must be pre-normalized");
1200
1201 if (recursive) {
1202 VectorSet visited;
1203 verify_adr_type(visited, _adr_type);
1204 }
1205 }
1206 #endif
1207
1208
1209 //------------------------------Value------------------------------------------
1210 // Compute the type of the PhiNode
1211 const Type* PhiNode::Value(PhaseGVN* phase) const {
1212 Node *r = in(0); // RegionNode
1213 if( !r ) // Copy or dead
1214 return in(1) ? phase->type(in(1)) : Type::TOP;
1458 assert(req() == 3, "same as region");
1459 RegionNode* region = in(0)->as_Region();
1460 for (uint i = 1; i < 3; i++) {
1461 Node* phi_input = in(i);
1462 if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1463 // Nothing is control-dependent on path #i except the region itself.
1464 MergeMemNode* merge_mem = phi_input->as_MergeMem();
1465 uint j = 3 - i;
1466 Node* other_phi_input = in(j);
1467 if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory() && !is_data_loop(region, phi_input, igvn)) {
1468 // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1469 // Only proceed if the transformation doesn't create a data loop
1470 // This will allow the diamond to collapse completely if there are no other phis left.
1471 igvn->replace_node(this, merge_mem);
1472 return true;
1473 }
1474 }
1475 }
1476 return false;
1477 }
1478 //----------------------------check_cmove_id-----------------------------------
1479 // Check for CMove'ing a constant after comparing against the constant.
1480 // Happens all the time now, since if we compare equality vs a constant in
1481 // the parser, we "know" the variable is constant on one path and we force
1482 // it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1483 // conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more
1484 // general in that we don't need constants. Since CMove's are only inserted
1485 // in very special circumstances, we do it here on generic Phi's.
1486 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1487 assert(true_path !=0, "only diamond shape graph expected");
1488
1489 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1490 // phi->region->if_proj->ifnode->bool->cmp
1491 Node* region = in(0);
1492 Node* iff = region->in(1)->in(0);
1493 BoolNode* b = iff->in(1)->as_Bool();
1494 Node* cmp = b->in(1);
1495 Node* tval = in(true_path);
1496 Node* fval = in(3-true_path);
1497 Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);
1512 }
1513
1514 return id;
1515 }
1516
1517 //------------------------------Identity---------------------------------------
1518 // Check for Region being Identity.
1519 Node* PhiNode::Identity(PhaseGVN* phase) {
1520 if (must_wait_for_region_in_irreducible_loop(phase)) {
1521 return this;
1522 }
1523 // Check for no merging going on
1524 // (There used to be special-case code here when this->region->is_Loop.
1525 // It would check for a tributary phi on the backedge that the main phi
1526 // trivially, perhaps with a single cast. The unique_input method
1527 // does all this and more, by reducing such tributaries to 'this'.)
1528 Node* uin = unique_input(phase, false);
1529 if (uin != nullptr) {
1530 return uin;
1531 }
1532
1533 int true_path = is_diamond_phi();
1534 // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet.
1535 if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) {
1536 Node* id = is_cmove_id(phase, true_path);
1537 if (id != nullptr) {
1538 return id;
1539 }
1540 }
1541
1542 // Looking for phis with identical inputs. If we find one that has
1543 // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1544 if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1545 TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1546 uint phi_len = req();
1547 Node* phi_reg = region();
1548 for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1549 Node* u = phi_reg->fast_out(i);
1550 if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1551 u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1611 }
1612 // Check for a unique input (maybe uncasted)
1613 if (input == nullptr) {
1614 input = un;
1615 } else if (input != un) {
1616 input = NodeSentinel; // no unique input
1617 }
1618 }
1619 if (input == nullptr) {
1620 return phase->C->top(); // no inputs
1621 }
1622
1623 if (input != NodeSentinel) {
1624 return input; // one unique direct input
1625 }
1626
1627 // Nothing.
1628 return nullptr;
1629 }
1630
1631 //------------------------------is_x2logic-------------------------------------
1632 // Check for simple convert-to-boolean pattern
1633 // If:(C Bool) Region:(IfF IfT) Phi:(Region 0 1)
1634 // Convert Phi to an ConvIB.
1635 static Node *is_x2logic( PhaseGVN *phase, PhiNode *phi, int true_path ) {
1636 assert(true_path !=0, "only diamond shape graph expected");
1637
1638 // If we're late in the optimization process, we may have already expanded Conv2B nodes
1639 if (phase->C->post_loop_opts_phase() && !Matcher::match_rule_supported(Op_Conv2B)) {
1640 return nullptr;
1641 }
1642
1643 // Convert the true/false index into an expected 0/1 return.
1644 // Map 2->0 and 1->1.
1645 int flipped = 2-true_path;
1646
1647 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1648 // phi->region->if_proj->ifnode->bool->cmp
1649 Node *region = phi->in(0);
1650 Node *iff = region->in(1)->in(0);
2078
2079 if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2080 if (worklist.member(rc->in(0)->in(1))) {
2081 delay = true;
2082 break;
2083 }
2084
2085 if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2086 if (worklist.member(rc->in(0)->in(1)->in(1))) {
2087 delay = true;
2088 break;
2089 }
2090 }
2091
2092 if (delay) {
2093 worklist.push(this);
2094 }
2095 return delay;
2096 }
2097
2098 // If the Phi's Region is in an irreducible loop, and the Region
2099 // has had an input removed, but not yet transformed, it could be
2100 // that the Region (and this Phi) are not reachable from Root.
2101 // If we allow the Phi to collapse before the Region, this may lead
2102 // to dead-loop data. Wait for the Region to check for reachability,
2103 // and potentially remove the dead code.
2104 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2105 RegionNode* region = in(0)->as_Region();
2106 if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2107 Node* top = phase->C->top();
2108 for (uint j = 1; j < req(); j++) {
2109 Node* rc = region->in(j); // for each control input
2110 if (rc == nullptr || phase->type(rc) == Type::TOP) {
2111 // Region is missing a control input
2112 Node* n = in(j);
2113 if (n != nullptr && n != top) {
2114 // Phi still has its input, so region just lost its input
2115 return true;
2116 }
2117 }
2514 // Phi (this) |
2515 // | |
2516 // +-----------+
2517 //
2518 // Generally, there are issues with non-termination with such circularity
2519 // (see comment further below). However, if there is a direct loop to self,
2520 // splitting the Phi through the MergeMem will result in the below.
2521 //
2522 // +---+
2523 // | |
2524 // v |
2525 // Phi |
2526 // |\ |
2527 // | +-+
2528 // (base_memory) v
2529 // MergeMem
2530 //
2531 // This split breaks the circularity and consequently does not lead to
2532 // non-termination.
2533 uint merge_width = 0;
2534 bool split_always_terminates = false; // Is splitting guaranteed to terminate?
2535 for( uint i=1; i<req(); ++i ) {// For all paths in
2536 Node *ii = in(i);
2537 // TOP inputs should not be counted as safe inputs because if the
2538 // Phi references itself through all other inputs then splitting the
2539 // Phi through memory merges would create dead loop at later stage.
2540 if (ii == top) {
2541 return nullptr; // Delay optimization until graph is cleaned.
2542 }
2543 if (ii->is_MergeMem()) {
2544 MergeMemNode* n = ii->as_MergeMem();
2545 merge_width = MAX2(merge_width, n->req());
2546 if (n->base_memory() == this) {
2547 split_always_terminates = true;
2548 }
2549 }
2550 }
2551
2552 // There are cases with circular dependencies between bottom Phis
2553 // and MergeMems. Below is a minimal example.
2554 //
2555 // +------------+
2556 // | |
2557 // (base_memory) v |
2558 // MergeMem |
2559 // | |
2560 // v |
2561 // Phi (this) |
2562 // | |
2563 // v |
2564 // Phi |
2565 // | |
2566 // +----------+
2567 //
2568 // Here, we cannot break the circularity through a self-loop as there
2569 // are two Phis involved. Repeatedly splitting the Phis through the
2570 // MergeMem leads to non-termination. We check for non-termination below.
2571 // Only check for non-termination if necessary.
2572 if (!split_always_terminates && adr_type() == TypePtr::BOTTOM &&
2573 merge_width > Compile::AliasIdxRaw) {
2574 split_always_terminates = is_split_through_mergemem_terminating();
2575 }
2576
2577 if (merge_width > Compile::AliasIdxRaw) {
2578 // found at least one non-empty MergeMem
2579 const TypePtr* at = adr_type();
2580 if (at != TypePtr::BOTTOM) {
2581 // Patch the existing phi to select an input from the merge:
2582 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2583 // Phi:AT1(...m1...)
2584 int alias_idx = phase->C->get_alias_index(at);
2585 for (uint i=1; i<req(); ++i) {
2586 Node *ii = in(i);
2587 if (ii->is_MergeMem()) {
2588 MergeMemNode* n = ii->as_MergeMem();
2589 // compress paths and change unreachable cycles to TOP
2590 // If not, we can update the input infinitely along a MergeMem cycle
2591 // Equivalent code is in MemNode::Ideal_common
2592 Node *m = phase->transform(n);
2593 if (outcnt() == 0) { // Above transform() may kill us!
2594 return top;
2595 }
2596 // If transformed to a MergeMem, get the desired slice
2597 // Otherwise the returned node represents memory for every slice
2598 Node *new_mem = (m->is_MergeMem()) ?
2599 m->as_MergeMem()->memory_at(alias_idx) : m;
2600 // Update input if it is progress over what we have now
2601 if (new_mem != ii) {
2602 set_req_X(i, new_mem, phase->is_IterGVN());
2603 progress = this;
2604 }
2605 }
2606 }
2607 } else if (split_always_terminates) {
2608 // If all inputs reference this phi (directly or through data nodes) -
2609 // it is a dead loop.
2610 bool saw_safe_input = false;
2611 for (uint j = 1; j < req(); ++j) {
2612 Node* n = in(j);
2613 if (n->is_MergeMem()) {
2614 MergeMemNode* mm = n->as_MergeMem();
2615 if (mm->base_memory() == this || mm->base_memory() == mm->empty_memory()) {
2616 // Skip this input if it references back to this phi or if the memory path is dead
2617 continue;
2618 }
2619 }
2620 if (!is_unsafe_data_reference(n)) {
2621 saw_safe_input = true; // found safe input
2622 break;
2623 }
2624 }
2625 if (!saw_safe_input) {
2626 // There is a dead loop: All inputs are either dead or reference back to this phi
2627 return top;
2628 }
2629
2630 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2631 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2632 PhaseIterGVN* igvn = phase->is_IterGVN();
2633 assert(igvn != nullptr, "sanity check");
2634 PhiNode* new_base = (PhiNode*) clone();
2635 // Must eagerly register phis, since they participate in loops.
2636 igvn->register_new_node_with_optimizer(new_base);
2637
2638 MergeMemNode* result = MergeMemNode::make(new_base);
2639 for (uint i = 1; i < req(); ++i) {
2640 Node *ii = in(i);
2641 if (ii->is_MergeMem()) {
2642 MergeMemNode* n = ii->as_MergeMem();
2643 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2644 // If we have not seen this slice yet, make a phi for it.
2645 bool made_new_phi = false;
2646 if (mms.is_empty()) {
2647 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2648 made_new_phi = true;
2649 igvn->register_new_node_with_optimizer(new_phi);
2650 mms.set_memory(new_phi);
2651 }
2652 Node* phi = mms.memory();
2653 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2654 phi->set_req(i, mms.memory2());
2655 }
2656 }
2657 }
2658 // Distribute all self-loops.
2659 { // (Extra braces to hide mms.)
2660 for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2661 Node* phi = mms.memory();
2662 for (uint i = 1; i < req(); ++i) {
2665 }
2666 }
2667
2668 // We could immediately transform the new Phi nodes here, but that can
2669 // result in creating an excessive number of new nodes within a single
2670 // IGVN iteration. We have put the Phi nodes on the IGVN worklist, so
2671 // they are transformed later on in any case.
2672
2673 // Replace self with the result.
2674 return result;
2675 }
2676 }
2677 //
2678 // Other optimizations on the memory chain
2679 //
2680 const TypePtr* at = adr_type();
2681 for( uint i=1; i<req(); ++i ) {// For all paths in
2682 Node *ii = in(i);
2683 Node *new_in = MemNode::optimize_memory_chain(ii, at, nullptr, phase);
2684 if (ii != new_in ) {
2685 set_req(i, new_in);
2686 progress = this;
2687 }
2688 }
2689 }
2690
2691 #ifdef _LP64
2692 // Push DecodeN/DecodeNKlass down through phi.
2693 // The rest of phi graph will transform by split EncodeP node though phis up.
2694 if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == nullptr) {
2695 bool may_push = true;
2696 bool has_decodeN = false;
2697 bool is_decodeN = false;
2698 for (uint i=1; i<req(); ++i) {// For all paths in
2699 Node *ii = in(i);
2700 if (ii->is_DecodeNarrowPtr() && ii->bottom_type() == bottom_type()) {
2701 // Do optimization if a non dead path exist.
2702 if (ii->in(1)->bottom_type() != Type::TOP) {
2703 has_decodeN = true;
2704 is_decodeN = ii->is_DecodeN();
2705 }
2733 if (is_decodeN) {
2734 new_ii = new EncodePNode(ii, narrow_t);
2735 } else {
2736 new_ii = new EncodePKlassNode(ii, narrow_t);
2737 }
2738 igvn->register_new_node_with_optimizer(new_ii);
2739 }
2740 }
2741 new_phi->set_req(i, new_ii);
2742 }
2743 igvn->register_new_node_with_optimizer(new_phi, this);
2744 if (is_decodeN) {
2745 progress = new DecodeNNode(new_phi, bottom_type());
2746 } else {
2747 progress = new DecodeNKlassNode(new_phi, bottom_type());
2748 }
2749 }
2750 }
2751 #endif
2752
2753 // Try to convert a Phi with two duplicated convert nodes into a phi of the pre-conversion type and the convert node
2754 // proceeding the phi, to de-duplicate the convert node and compact the IR.
2755 if (can_reshape && progress == nullptr) {
2756 ConvertNode* convert = in(1)->isa_Convert();
2757 if (convert != nullptr) {
2758 int conv_op = convert->Opcode();
2759 bool ok = true;
2760
2761 // Check the rest of the inputs
2762 for (uint i = 2; i < req(); i++) {
2763 // Make sure that all inputs are of the same type of convert node
2764 if (in(i)->Opcode() != conv_op) {
2765 ok = false;
2766 break;
2767 }
2768 }
2769
2770 if (ok) {
2771 // Find the local bottom type to set as the type of the phi
2772 const Type* source_type = Type::get_const_basic_type(convert->in_type()->basic_type());
2776 // Set inputs to the new phi be the inputs of the convert
2777 for (uint i = 1; i < req(); i++) {
2778 newphi->init_req(i, in(i)->in(1));
2779 }
2780
2781 phase->is_IterGVN()->register_new_node_with_optimizer(newphi, this);
2782
2783 return ConvertNode::create_convert(get_convert_type(convert, source_type), get_convert_type(convert, dest_type), newphi);
2784 }
2785 }
2786 }
2787
2788 // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2789 if (EnableVectorReboxing && can_reshape && progress == nullptr && type()->isa_oopptr()) {
2790 progress = merge_through_phi(this, phase->is_IterGVN());
2791 }
2792
2793 return progress; // Return any progress
2794 }
2795
2796 static int compare_types(const Type* const& e1, const Type* const& e2) {
2797 return (intptr_t)e1 - (intptr_t)e2;
2798 }
2799
2800 // Collect types at casts that are going to be eliminated at that Phi and store them in a TypeTuple.
2801 // Sort the types using an arbitrary order so a list of some types always hashes to the same TypeTuple (and TypeTuple
2802 // pointer comparison is enough to tell if 2 list of types are the same or not)
2803 const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
2804 const Node* region = in(0);
2805 const Type* phi_type = bottom_type();
2806 ResourceMark rm;
2807 GrowableArray<const Type*> types;
2808 for (uint i = 1; i < req(); i++) {
2809 if (region->in(i) == nullptr || phase->type(region->in(i)) == Type::TOP) {
2810 continue;
2811 }
2812 Node* in = Node::in(i);
2813 const Type* t = phase->type(in);
2814 if (in == nullptr || in == this || t == Type::TOP) {
2815 continue;
3160 #ifndef PRODUCT
3161 void CatchProjNode::dump_spec(outputStream *st) const {
3162 ProjNode::dump_spec(st);
3163 st->print("@bci %d ",_handler_bci);
3164 }
3165 #endif
3166
3167 //=============================================================================
3168 //------------------------------Identity---------------------------------------
3169 // Check for CreateEx being Identity.
3170 Node* CreateExNode::Identity(PhaseGVN* phase) {
3171 if( phase->type(in(1)) == Type::TOP ) return in(1);
3172 if( phase->type(in(0)) == Type::TOP ) return in(0);
3173 if (phase->type(in(0)->in(0)) == Type::TOP) {
3174 assert(in(0)->is_CatchProj(), "control is CatchProj");
3175 return phase->C->top(); // dead code
3176 }
3177 // We only come from CatchProj, unless the CatchProj goes away.
3178 // If the CatchProj is optimized away, then we just carry the
3179 // exception oop through.
3180 CallNode *call = in(1)->in(0)->as_Call();
3181
3182 return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
3183 in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
3184 }
3185
3186 //=============================================================================
3187 //------------------------------Value------------------------------------------
3188 // Check for being unreachable.
3189 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
3190 if (!in(0) || in(0)->is_top()) return Type::TOP;
3191 return bottom_type();
3192 }
3193
3194 //------------------------------Ideal------------------------------------------
3195 // Check for no longer being part of a loop
3196 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3197 if (can_reshape && !in(0)->is_Region()) {
3198 // Dead code elimination can sometimes delete this projection so
3199 // if it's not there, there's nothing to do.
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/objArrayKlass.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/cfgnode.hpp"
33 #include "opto/connode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/inlinetypenode.hpp"
36 #include "opto/loopnode.hpp"
37 #include "opto/machnode.hpp"
38 #include "opto/movenode.hpp"
39 #include "opto/mulnode.hpp"
40 #include "opto/narrowptrnode.hpp"
41 #include "opto/phaseX.hpp"
42 #include "opto/regalloc.hpp"
43 #include "opto/regmask.hpp"
44 #include "opto/runtime.hpp"
45 #include "opto/subnode.hpp"
46 #include "opto/vectornode.hpp"
47 #include "utilities/vmError.hpp"
48
49 // Portions of code courtesy of Clifford Click
50
51 // Optimization - Graph Style
52
53 //=============================================================================
54 //------------------------------Value------------------------------------------
55 // Compute the type of the RegionNode.
504 if (left_path == nullptr || right_path == nullptr) {
505 return false;
506 }
507 Node* diamond_if = left_path->in(0);
508 if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
509 // Not an IfNode merging a diamond or TOP.
510 return false;
511 }
512
513 // Check for a proper bool/cmp
514 const Node* bol = diamond_if->in(1);
515 if (!bol->is_Bool()) {
516 return false;
517 }
518 const Node* cmp = bol->in(1);
519 if (!cmp->is_Cmp()) {
520 return false;
521 }
522 return true;
523 }
524
525 //------------------------------Ideal------------------------------------------
526 // Return a node which is more "ideal" than the current node. Must preserve
527 // the CFG, but we can still strip out dead paths.
528 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
529 if( !can_reshape && !in(0) ) return nullptr; // Already degraded to a Copy
530 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
531
532 // Check for RegionNode with no Phi users and both inputs come from either
533 // arm of the same IF. If found, then the control-flow split is useless.
534 bool has_phis = false;
535 if (can_reshape) { // Need DU info to check for Phi users
536 try_clean_mem_phis(phase->is_IterGVN());
537 has_phis = (has_phi() != nullptr); // Cache result
538
539 if (!has_phis) { // No Phi users? Nothing merging?
540 for (uint i = 1; i < req()-1; i++) {
541 Node *if1 = in(i);
542 if( !if1 ) continue;
543 Node *iff = if1->in(0);
544 if( !iff || !iff->is_If() ) continue;
951 if (iff1 == iff2) {
952 igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
953 igvn->replace_input_of(region, idx1, iff1->in(0));
954 igvn->replace_input_of(region, idx2, igvn->C->top());
955 return (region == this); // Remove useless if (both projections map to the same control/value)
956 }
957 BoolNode* bol1 = iff1->in(1)->isa_Bool();
958 BoolNode* bol2 = iff2->in(1)->isa_Bool();
959 if (bol1 == nullptr || bol2 == nullptr) {
960 return false; // No bool inputs found
961 }
962 Node* cmp1 = bol1->in(1);
963 Node* cmp2 = bol2->in(1);
964 bool commute = false;
965 if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
966 return false; // No comparison
967 } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
968 cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
969 cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
970 cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
971 cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() ||
972 cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) {
973 // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
974 // SubTypeCheck is not commutative
975 return false;
976 } else if (cmp1 != cmp2) {
977 if (cmp1->in(1) == cmp2->in(2) &&
978 cmp1->in(2) == cmp2->in(1)) {
979 commute = true; // Same but swapped inputs, commute the test
980 } else {
981 return false; // Ifs are not comparing the same values
982 }
983 }
984 proj1 = proj1->other_if_proj();
985 proj2 = proj2->other_if_proj();
986 if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
987 proj2->unique_ctrl_out_or_null() == this) ||
988 (proj2->unique_ctrl_out_or_null() == iff1 &&
989 proj1->unique_ctrl_out_or_null() == this))) {
990 return false; // Ifs are not connected through other projs
991 }
992 // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged
1031 st->print("#reducible ");
1032 break;
1033 case RegionNode::LoopStatus::NeverIrreducibleEntry:
1034 break; // nothing
1035 }
1036 }
1037 #endif
1038
1039 // Find the one non-null required input. RegionNode only
1040 Node *Node::nonnull_req() const {
1041 assert( is_Region(), "" );
1042 for( uint i = 1; i < _cnt; i++ )
1043 if( in(i) )
1044 return in(i);
1045 ShouldNotReachHere();
1046 return nullptr;
1047 }
1048
1049
1050 //=============================================================================
1051 // note that these functions assume that the _adr_type field is flat
1052 uint PhiNode::hash() const {
1053 const Type* at = _adr_type;
1054 return TypeNode::hash() + (at ? at->hash() : 0);
1055 }
1056 bool PhiNode::cmp( const Node &n ) const {
1057 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1058 }
1059 static inline
1060 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1061 if (at == nullptr || at == TypePtr::BOTTOM) return at;
1062 return Compile::current()->alias_type(at)->adr_type();
1063 }
1064
1065 //----------------------------make---------------------------------------------
1066 // create a new phi with edges matching r and set (initially) to x
1067 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1068 uint preds = r->req(); // Number of predecessor paths
1069 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flat_accesses_share_alias()), "flatten at");
1070 PhiNode* p = new PhiNode(r, t, at);
1071 for (uint j = 1; j < preds; j++) {
1072 // Fill in all inputs, except those which the region does not yet have
1073 if (r->in(j) != nullptr)
1074 p->init_req(j, x);
1075 }
1076 return p;
1077 }
1078 PhiNode* PhiNode::make(Node* r, Node* x) {
1079 const Type* t = x->bottom_type();
1080 const TypePtr* at = nullptr;
1081 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1082 return make(r, x, t, at);
1083 }
1084 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1085 const Type* t = x->bottom_type();
1086 const TypePtr* at = nullptr;
1087 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1088 return new PhiNode(r, t, at);
1089 }
1178 np->as_Phi()->verify_adr_type(visited, at);
1179 } else if (n->bottom_type() == Type::TOP
1180 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1181 // ignore top inputs
1182 } else {
1183 const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1184 // recheck phi/non-phi consistency at leaves:
1185 assert((nat != nullptr) == (at != nullptr), "");
1186 assert(nat == at || nat == TypePtr::BOTTOM,
1187 "adr_type must be consistent at leaves of phi nest");
1188 }
1189 }
1190 }
1191
1192 // Verify a whole nest of phis rooted at this one.
1193 void PhiNode::verify_adr_type(bool recursive) const {
1194 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error
1195 if (Node::in_dump()) return; // muzzle asserts when printing
1196
1197 assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");
1198 // Flat array element shouldn't get their own memory slice until flat_accesses_share_alias is cleared.
1199 // It could be the graph has no loads/stores and flat_accesses_share_alias is never cleared. EA could still
1200 // creates per element Phis but that wouldn't be a problem as there are no memory accesses for that array.
1201 assert(_adr_type == nullptr || _adr_type->isa_aryptr() == nullptr ||
1202 _adr_type->is_aryptr()->is_known_instance() ||
1203 !_adr_type->is_aryptr()->is_flat() ||
1204 !Compile::current()->flat_accesses_share_alias() ||
1205 _adr_type == TypeAryPtr::INLINES, "flat array element shouldn't get its own slice yet");
1206
1207 if (!VerifyAliases) return; // verify thoroughly only if requested
1208
1209 assert(_adr_type == flatten_phi_adr_type(_adr_type),
1210 "Phi::adr_type must be pre-normalized");
1211
1212 if (recursive) {
1213 VectorSet visited;
1214 verify_adr_type(visited, _adr_type);
1215 }
1216 }
1217 #endif
1218
1219
1220 //------------------------------Value------------------------------------------
1221 // Compute the type of the PhiNode
1222 const Type* PhiNode::Value(PhaseGVN* phase) const {
1223 Node *r = in(0); // RegionNode
1224 if( !r ) // Copy or dead
1225 return in(1) ? phase->type(in(1)) : Type::TOP;
1469 assert(req() == 3, "same as region");
1470 RegionNode* region = in(0)->as_Region();
1471 for (uint i = 1; i < 3; i++) {
1472 Node* phi_input = in(i);
1473 if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1474 // Nothing is control-dependent on path #i except the region itself.
1475 MergeMemNode* merge_mem = phi_input->as_MergeMem();
1476 uint j = 3 - i;
1477 Node* other_phi_input = in(j);
1478 if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory() && !is_data_loop(region, phi_input, igvn)) {
1479 // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1480 // Only proceed if the transformation doesn't create a data loop
1481 // This will allow the diamond to collapse completely if there are no other phis left.
1482 igvn->replace_node(this, merge_mem);
1483 return true;
1484 }
1485 }
1486 }
1487 return false;
1488 }
1489
1490 //----------------------------check_cmove_id-----------------------------------
1491 // Check for CMove'ing a constant after comparing against the constant.
1492 // Happens all the time now, since if we compare equality vs a constant in
1493 // the parser, we "know" the variable is constant on one path and we force
1494 // it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1495 // conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more
1496 // general in that we don't need constants. Since CMove's are only inserted
1497 // in very special circumstances, we do it here on generic Phi's.
1498 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1499 assert(true_path !=0, "only diamond shape graph expected");
1500
1501 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1502 // phi->region->if_proj->ifnode->bool->cmp
1503 Node* region = in(0);
1504 Node* iff = region->in(1)->in(0);
1505 BoolNode* b = iff->in(1)->as_Bool();
1506 Node* cmp = b->in(1);
1507 Node* tval = in(true_path);
1508 Node* fval = in(3-true_path);
1509 Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);
1524 }
1525
1526 return id;
1527 }
1528
1529 //------------------------------Identity---------------------------------------
1530 // Check for Region being Identity.
1531 Node* PhiNode::Identity(PhaseGVN* phase) {
1532 if (must_wait_for_region_in_irreducible_loop(phase)) {
1533 return this;
1534 }
1535 // Check for no merging going on
1536 // (There used to be special-case code here when this->region->is_Loop.
1537 // It would check for a tributary phi on the backedge that the main phi
1538 // trivially, perhaps with a single cast. The unique_input method
1539 // does all this and more, by reducing such tributaries to 'this'.)
1540 Node* uin = unique_input(phase, false);
1541 if (uin != nullptr) {
1542 return uin;
1543 }
1544 uin = unique_constant_input_recursive(phase);
1545 if (uin != nullptr) {
1546 return uin;
1547 }
1548
1549 int true_path = is_diamond_phi();
1550 // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet.
1551 if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) {
1552 Node* id = is_cmove_id(phase, true_path);
1553 if (id != nullptr) {
1554 return id;
1555 }
1556 }
1557
1558 // Looking for phis with identical inputs. If we find one that has
1559 // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1560 if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1561 TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1562 uint phi_len = req();
1563 Node* phi_reg = region();
1564 for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1565 Node* u = phi_reg->fast_out(i);
1566 if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1567 u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1627 }
1628 // Check for a unique input (maybe uncasted)
1629 if (input == nullptr) {
1630 input = un;
1631 } else if (input != un) {
1632 input = NodeSentinel; // no unique input
1633 }
1634 }
1635 if (input == nullptr) {
1636 return phase->C->top(); // no inputs
1637 }
1638
1639 if (input != NodeSentinel) {
1640 return input; // one unique direct input
1641 }
1642
1643 // Nothing.
1644 return nullptr;
1645 }
1646
1647 // Find the unique input, try to look recursively through input Phis
1648 Node* PhiNode::unique_constant_input_recursive(PhaseGVN* phase) {
1649 if (!phase->is_IterGVN()) {
1650 return nullptr;
1651 }
1652
1653 ResourceMark rm;
1654 Node* unique = nullptr;
1655 Unique_Node_List visited;
1656 visited.push(this);
1657
1658 for (uint visited_idx = 0; visited_idx < visited.size(); visited_idx++) {
1659 Node* current = visited.at(visited_idx);
1660 for (uint i = 1; i < current->req(); i++) {
1661 Node* phi_in = current->in(i);
1662 if (phi_in == nullptr) {
1663 continue;
1664 }
1665
1666 if (phi_in->is_Phi()) {
1667 visited.push(phi_in);
1668 } else {
1669 if (unique == nullptr) {
1670 if (!phi_in->is_Con()) {
1671 return nullptr;
1672 }
1673 unique = phi_in;
1674 } else if (unique != phi_in) {
1675 return nullptr;
1676 }
1677 }
1678 }
1679 }
1680 return unique;
1681 }
1682
1683 //------------------------------is_x2logic-------------------------------------
1684 // Check for simple convert-to-boolean pattern
1685 // If:(C Bool) Region:(IfF IfT) Phi:(Region 0 1)
1686 // Convert Phi to an ConvIB.
1687 static Node *is_x2logic( PhaseGVN *phase, PhiNode *phi, int true_path ) {
1688 assert(true_path !=0, "only diamond shape graph expected");
1689
1690 // If we're late in the optimization process, we may have already expanded Conv2B nodes
1691 if (phase->C->post_loop_opts_phase() && !Matcher::match_rule_supported(Op_Conv2B)) {
1692 return nullptr;
1693 }
1694
1695 // Convert the true/false index into an expected 0/1 return.
1696 // Map 2->0 and 1->1.
1697 int flipped = 2-true_path;
1698
1699 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1700 // phi->region->if_proj->ifnode->bool->cmp
1701 Node *region = phi->in(0);
1702 Node *iff = region->in(1)->in(0);
2130
2131 if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2132 if (worklist.member(rc->in(0)->in(1))) {
2133 delay = true;
2134 break;
2135 }
2136
2137 if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2138 if (worklist.member(rc->in(0)->in(1)->in(1))) {
2139 delay = true;
2140 break;
2141 }
2142 }
2143
2144 if (delay) {
2145 worklist.push(this);
2146 }
2147 return delay;
2148 }
2149
2150 // Push inline type input nodes (and null) down through the phi recursively (can handle data loops).
2151 InlineTypeNode* PhiNode::push_inline_types_down(PhaseGVN* phase, bool can_reshape, ciInlineKlass* inline_klass) {
2152 assert(inline_klass != nullptr, "must be");
2153 InlineTypeNode* vt = InlineTypeNode::make_null(*phase, inline_klass, /* transform = */ false)->clone_with_phis(phase, in(0), nullptr, !_type->maybe_null(), true);
2154 if (can_reshape) {
2155 // Replace phi right away to be able to use the inline
2156 // type node when reaching the phi again through data loops.
2157 PhaseIterGVN* igvn = phase->is_IterGVN();
2158 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2159 Node* u = fast_out(i);
2160 igvn->rehash_node_delayed(u);
2161 imax -= u->replace_edge(this, vt);
2162 --i;
2163 }
2164 igvn->rehash_node_delayed(this);
2165 assert(outcnt() == 0, "should be dead now");
2166 }
2167 ResourceMark rm;
2168 Node_List casts;
2169 for (uint i = 1; i < req(); ++i) {
2170 Node* n = in(i);
2171 while (n->is_ConstraintCast()) {
2172 casts.push(n);
2173 n = n->in(1);
2174 }
2175 if (phase->type(n)->is_zero_type()) {
2176 n = InlineTypeNode::make_null(*phase, inline_klass);
2177 } else if (n->is_Phi()) {
2178 assert(can_reshape, "can only handle phis during IGVN");
2179 n = phase->transform(n->as_Phi()->push_inline_types_down(phase, can_reshape, inline_klass));
2180 }
2181 while (casts.size() != 0) {
2182 // Push the cast(s) through the InlineTypeNode
2183 // TODO 8302217 Can we avoid cloning? See InlineTypeNode::clone_if_required
2184 Node* cast = casts.pop()->clone();
2185 cast->set_req_X(1, n->as_InlineType()->get_oop(), phase);
2186 n = n->clone();
2187 n->as_InlineType()->set_oop(*phase, phase->transform(cast));
2188 n = phase->transform(n);
2189 if (n->is_top()) {
2190 break;
2191 }
2192 }
2193 bool transform = !can_reshape && (i == (req()-1)); // Transform phis on last merge
2194 assert(n->is_top() || n->is_InlineType(), "Only InlineType or top at this point.");
2195 if (n->is_InlineType()) {
2196 vt->merge_with(phase, n->as_InlineType(), i, transform);
2197 } // else nothing to do: phis above vt created by clone_with_phis are initialized to top already.
2198 }
2199 return vt;
2200 }
2201
2202 // If the Phi's Region is in an irreducible loop, and the Region
2203 // has had an input removed, but not yet transformed, it could be
2204 // that the Region (and this Phi) are not reachable from Root.
2205 // If we allow the Phi to collapse before the Region, this may lead
2206 // to dead-loop data. Wait for the Region to check for reachability,
2207 // and potentially remove the dead code.
2208 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2209 RegionNode* region = in(0)->as_Region();
2210 if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2211 Node* top = phase->C->top();
2212 for (uint j = 1; j < req(); j++) {
2213 Node* rc = region->in(j); // for each control input
2214 if (rc == nullptr || phase->type(rc) == Type::TOP) {
2215 // Region is missing a control input
2216 Node* n = in(j);
2217 if (n != nullptr && n != top) {
2218 // Phi still has its input, so region just lost its input
2219 return true;
2220 }
2221 }
2618 // Phi (this) |
2619 // | |
2620 // +-----------+
2621 //
2622 // Generally, there are issues with non-termination with such circularity
2623 // (see comment further below). However, if there is a direct loop to self,
2624 // splitting the Phi through the MergeMem will result in the below.
2625 //
2626 // +---+
2627 // | |
2628 // v |
2629 // Phi |
2630 // |\ |
2631 // | +-+
2632 // (base_memory) v
2633 // MergeMem
2634 //
2635 // This split breaks the circularity and consequently does not lead to
2636 // non-termination.
2637 uint merge_width = 0;
2638 // TODO revisit this with JDK-8247216
2639 bool mergemem_only = true;
2640 bool split_always_terminates = false; // Is splitting guaranteed to terminate?
2641 for( uint i=1; i<req(); ++i ) {// For all paths in
2642 Node *ii = in(i);
2643 // TOP inputs should not be counted as safe inputs because if the
2644 // Phi references itself through all other inputs then splitting the
2645 // Phi through memory merges would create dead loop at later stage.
2646 if (ii == top) {
2647 return nullptr; // Delay optimization until graph is cleaned.
2648 }
2649 if (ii->is_MergeMem()) {
2650 MergeMemNode* n = ii->as_MergeMem();
2651 merge_width = MAX2(merge_width, n->req());
2652 if (n->base_memory() == this) {
2653 split_always_terminates = true;
2654 }
2655 } else {
2656 mergemem_only = false;
2657 }
2658 }
2659
2660 // There are cases with circular dependencies between bottom Phis
2661 // and MergeMems. Below is a minimal example.
2662 //
2663 // +------------+
2664 // | |
2665 // (base_memory) v |
2666 // MergeMem |
2667 // | |
2668 // v |
2669 // Phi (this) |
2670 // | |
2671 // v |
2672 // Phi |
2673 // | |
2674 // +----------+
2675 //
2676 // Here, we cannot break the circularity through a self-loop as there
2677 // are two Phis involved. Repeatedly splitting the Phis through the
2678 // MergeMem leads to non-termination. We check for non-termination below.
2679 // Only check for non-termination if necessary.
2680 if (!mergemem_only && !split_always_terminates && adr_type() == TypePtr::BOTTOM &&
2681 merge_width > Compile::AliasIdxRaw) {
2682 split_always_terminates = is_split_through_mergemem_terminating();
2683 }
2684
2685 if (merge_width > Compile::AliasIdxRaw) {
2686 // found at least one non-empty MergeMem
2687 const TypePtr* at = adr_type();
2688 if (at != TypePtr::BOTTOM) {
2689 // Patch the existing phi to select an input from the merge:
2690 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2691 // Phi:AT1(...m1...)
2692 int alias_idx = phase->C->get_alias_index(at);
2693 for (uint i=1; i<req(); ++i) {
2694 Node *ii = in(i);
2695 if (ii->is_MergeMem()) {
2696 MergeMemNode* n = ii->as_MergeMem();
2697 // compress paths and change unreachable cycles to TOP
2698 // If not, we can update the input infinitely along a MergeMem cycle
2699 // Equivalent code is in MemNode::Ideal_common
2700 Node *m = phase->transform(n);
2701 if (outcnt() == 0) { // Above transform() may kill us!
2702 return top;
2703 }
2704 // If transformed to a MergeMem, get the desired slice
2705 // Otherwise the returned node represents memory for every slice
2706 Node *new_mem = (m->is_MergeMem()) ?
2707 m->as_MergeMem()->memory_at(alias_idx) : m;
2708 // Update input if it is progress over what we have now
2709 if (new_mem != ii) {
2710 set_req_X(i, new_mem, phase->is_IterGVN());
2711 progress = this;
2712 }
2713 }
2714 }
2715 } else if (mergemem_only || split_always_terminates) {
2716 // If all inputs reference this phi (directly or through data nodes) -
2717 // it is a dead loop.
2718 bool saw_safe_input = false;
2719 for (uint j = 1; j < req(); ++j) {
2720 Node* n = in(j);
2721 if (n->is_MergeMem()) {
2722 MergeMemNode* mm = n->as_MergeMem();
2723 if (mm->base_memory() == this || mm->base_memory() == mm->empty_memory()) {
2724 // Skip this input if it references back to this phi or if the memory path is dead
2725 continue;
2726 }
2727 }
2728 if (!is_unsafe_data_reference(n)) {
2729 saw_safe_input = true; // found safe input
2730 break;
2731 }
2732 }
2733 if (!saw_safe_input) {
2734 // There is a dead loop: All inputs are either dead or reference back to this phi
2735 return top;
2736 }
2737
2738 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2739 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2740 PhaseIterGVN* igvn = phase->is_IterGVN();
2741 assert(igvn != nullptr, "sanity check");
2742 PhiNode* new_base = (PhiNode*) clone();
2743 // Must eagerly register phis, since they participate in loops.
2744 igvn->register_new_node_with_optimizer(new_base);
2745
2746 MergeMemNode* result = MergeMemNode::make(new_base);
2747 for (uint i = 1; i < req(); ++i) {
2748 Node *ii = in(i);
2749 if (ii->is_MergeMem()) {
2750 MergeMemNode* n = ii->as_MergeMem();
2751 if (igvn) {
2752 // TODO revisit this with JDK-8247216
2753 // Put 'n' on the worklist because it might be modified by MergeMemStream::iteration_setup
2754 igvn->_worklist.push(n);
2755 }
2756 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2757 // If we have not seen this slice yet, make a phi for it.
2758 bool made_new_phi = false;
2759 if (mms.is_empty()) {
2760 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2761 made_new_phi = true;
2762 igvn->register_new_node_with_optimizer(new_phi);
2763 mms.set_memory(new_phi);
2764 }
2765 Node* phi = mms.memory();
2766 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2767 phi->set_req(i, mms.memory2());
2768 }
2769 }
2770 }
2771 // Distribute all self-loops.
2772 { // (Extra braces to hide mms.)
2773 for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2774 Node* phi = mms.memory();
2775 for (uint i = 1; i < req(); ++i) {
2778 }
2779 }
2780
2781 // We could immediately transform the new Phi nodes here, but that can
2782 // result in creating an excessive number of new nodes within a single
2783 // IGVN iteration. We have put the Phi nodes on the IGVN worklist, so
2784 // they are transformed later on in any case.
2785
2786 // Replace self with the result.
2787 return result;
2788 }
2789 }
2790 //
2791 // Other optimizations on the memory chain
2792 //
2793 const TypePtr* at = adr_type();
2794 for( uint i=1; i<req(); ++i ) {// For all paths in
2795 Node *ii = in(i);
2796 Node *new_in = MemNode::optimize_memory_chain(ii, at, nullptr, phase);
2797 if (ii != new_in ) {
2798 set_req_X(i, new_in, phase->is_IterGVN());
2799 progress = this;
2800 }
2801 }
2802 }
2803
2804 #ifdef _LP64
2805 // Push DecodeN/DecodeNKlass down through phi.
2806 // The rest of phi graph will transform by split EncodeP node though phis up.
2807 if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == nullptr) {
2808 bool may_push = true;
2809 bool has_decodeN = false;
2810 bool is_decodeN = false;
2811 for (uint i=1; i<req(); ++i) {// For all paths in
2812 Node *ii = in(i);
2813 if (ii->is_DecodeNarrowPtr() && ii->bottom_type() == bottom_type()) {
2814 // Do optimization if a non dead path exist.
2815 if (ii->in(1)->bottom_type() != Type::TOP) {
2816 has_decodeN = true;
2817 is_decodeN = ii->is_DecodeN();
2818 }
2846 if (is_decodeN) {
2847 new_ii = new EncodePNode(ii, narrow_t);
2848 } else {
2849 new_ii = new EncodePKlassNode(ii, narrow_t);
2850 }
2851 igvn->register_new_node_with_optimizer(new_ii);
2852 }
2853 }
2854 new_phi->set_req(i, new_ii);
2855 }
2856 igvn->register_new_node_with_optimizer(new_phi, this);
2857 if (is_decodeN) {
2858 progress = new DecodeNNode(new_phi, bottom_type());
2859 } else {
2860 progress = new DecodeNKlassNode(new_phi, bottom_type());
2861 }
2862 }
2863 }
2864 #endif
2865
2866 Node* inline_type = try_push_inline_types_down(phase, can_reshape);
2867 if (inline_type != this) {
2868 return inline_type;
2869 }
2870
2871 // Try to convert a Phi with two duplicated convert nodes into a phi of the pre-conversion type and the convert node
2872 // proceeding the phi, to de-duplicate the convert node and compact the IR.
2873 if (can_reshape && progress == nullptr) {
2874 ConvertNode* convert = in(1)->isa_Convert();
2875 if (convert != nullptr) {
2876 int conv_op = convert->Opcode();
2877 bool ok = true;
2878
2879 // Check the rest of the inputs
2880 for (uint i = 2; i < req(); i++) {
2881 // Make sure that all inputs are of the same type of convert node
2882 if (in(i)->Opcode() != conv_op) {
2883 ok = false;
2884 break;
2885 }
2886 }
2887
2888 if (ok) {
2889 // Find the local bottom type to set as the type of the phi
2890 const Type* source_type = Type::get_const_basic_type(convert->in_type()->basic_type());
2894 // Set inputs to the new phi be the inputs of the convert
2895 for (uint i = 1; i < req(); i++) {
2896 newphi->init_req(i, in(i)->in(1));
2897 }
2898
2899 phase->is_IterGVN()->register_new_node_with_optimizer(newphi, this);
2900
2901 return ConvertNode::create_convert(get_convert_type(convert, source_type), get_convert_type(convert, dest_type), newphi);
2902 }
2903 }
2904 }
2905
2906 // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2907 if (EnableVectorReboxing && can_reshape && progress == nullptr && type()->isa_oopptr()) {
2908 progress = merge_through_phi(this, phase->is_IterGVN());
2909 }
2910
2911 return progress; // Return any progress
2912 }
2913
2914 // Check recursively if inputs are either an inline type, constant null
2915 // or another Phi (including self references through data loops). If so,
2916 // push the inline types down through the phis to enable folding of loads.
2917 Node* PhiNode::try_push_inline_types_down(PhaseGVN* phase, const bool can_reshape) {
2918 if (!can_be_inline_type()) {
2919 return this;
2920 }
2921
2922 ciInlineKlass* inline_klass;
2923 if (can_push_inline_types_down(phase, can_reshape, inline_klass)) {
2924 assert(inline_klass != nullptr, "must be");
2925 return push_inline_types_down(phase, can_reshape, inline_klass);
2926 }
2927 return this;
2928 }
2929
2930 bool PhiNode::can_push_inline_types_down(PhaseGVN* phase, const bool can_reshape, ciInlineKlass*& inline_klass) {
2931 if (req() <= 2) {
2932 // Dead phi.
2933 return false;
2934 }
2935 inline_klass = nullptr;
2936
2937 // TODO 8302217 We need to prevent endless pushing through
2938 bool only_phi = (outcnt() != 0);
2939 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2940 Node* n = fast_out(i);
2941 if (n->is_InlineType() && n->in(1) == this) {
2942 return false;
2943 }
2944 if (!n->is_Phi()) {
2945 only_phi = false;
2946 }
2947 }
2948 if (only_phi) {
2949 return false;
2950 }
2951
2952 ResourceMark rm;
2953 Unique_Node_List worklist;
2954 worklist.push(this);
2955 Node_List casts;
2956
2957 for (uint next = 0; next < worklist.size(); next++) {
2958 Node* phi = worklist.at(next);
2959 for (uint i = 1; i < phi->req(); i++) {
2960 Node* n = phi->in(i);
2961 if (n == nullptr) {
2962 return false;
2963 }
2964 while (n->is_ConstraintCast()) {
2965 if (n->in(0) != nullptr && n->in(0)->is_top()) {
2966 // Will die, don't optimize
2967 return false;
2968 }
2969 casts.push(n);
2970 n = n->in(1);
2971 }
2972 const Type* type = phase->type(n);
2973 if (n->is_InlineType() && (inline_klass == nullptr || inline_klass == type->inline_klass())) {
2974 inline_klass = type->inline_klass();
2975 } else if (n->is_Phi() && can_reshape && n->bottom_type()->isa_ptr()) {
2976 worklist.push(n);
2977 } else if (!type->is_zero_type()) {
2978 return false;
2979 }
2980 }
2981 }
2982 if (inline_klass == nullptr) {
2983 return false;
2984 }
2985
2986 // Check if cast nodes can be pushed through
2987 const Type* t = Type::get_const_type(inline_klass);
2988 while (casts.size() != 0 && t != nullptr) {
2989 Node* cast = casts.pop();
2990 if (t->filter(cast->bottom_type()) == Type::TOP) {
2991 return false;
2992 }
2993 }
2994
2995 return true;
2996 }
2997
2998 #ifdef ASSERT
2999 bool PhiNode::can_push_inline_types_down(PhaseGVN* phase) {
3000 if (!can_be_inline_type()) {
3001 return false;
3002 }
3003
3004 ciInlineKlass* inline_klass;
3005 return can_push_inline_types_down(phase, true, inline_klass);
3006 }
3007 #endif // ASSERT
3008
3009 static int compare_types(const Type* const& e1, const Type* const& e2) {
3010 return (intptr_t)e1 - (intptr_t)e2;
3011 }
3012
3013 // Collect types at casts that are going to be eliminated at that Phi and store them in a TypeTuple.
3014 // Sort the types using an arbitrary order so a list of some types always hashes to the same TypeTuple (and TypeTuple
3015 // pointer comparison is enough to tell if 2 list of types are the same or not)
3016 const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
3017 const Node* region = in(0);
3018 const Type* phi_type = bottom_type();
3019 ResourceMark rm;
3020 GrowableArray<const Type*> types;
3021 for (uint i = 1; i < req(); i++) {
3022 if (region->in(i) == nullptr || phase->type(region->in(i)) == Type::TOP) {
3023 continue;
3024 }
3025 Node* in = Node::in(i);
3026 const Type* t = phase->type(in);
3027 if (in == nullptr || in == this || t == Type::TOP) {
3028 continue;
3373 #ifndef PRODUCT
3374 void CatchProjNode::dump_spec(outputStream *st) const {
3375 ProjNode::dump_spec(st);
3376 st->print("@bci %d ",_handler_bci);
3377 }
3378 #endif
3379
3380 //=============================================================================
3381 //------------------------------Identity---------------------------------------
3382 // Check for CreateEx being Identity.
3383 Node* CreateExNode::Identity(PhaseGVN* phase) {
3384 if( phase->type(in(1)) == Type::TOP ) return in(1);
3385 if( phase->type(in(0)) == Type::TOP ) return in(0);
3386 if (phase->type(in(0)->in(0)) == Type::TOP) {
3387 assert(in(0)->is_CatchProj(), "control is CatchProj");
3388 return phase->C->top(); // dead code
3389 }
3390 // We only come from CatchProj, unless the CatchProj goes away.
3391 // If the CatchProj is optimized away, then we just carry the
3392 // exception oop through.
3393
3394 // CheckCastPPNode::Ideal() for inline types reuses the exception
3395 // paths of a call to perform an allocation: we can see a Phi here.
3396 if (in(1)->is_Phi()) {
3397 return this;
3398 }
3399 CallNode *call = in(1)->in(0)->as_Call();
3400
3401 return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
3402 in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
3403 }
3404
3405 //=============================================================================
3406 //------------------------------Value------------------------------------------
3407 // Check for being unreachable.
3408 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
3409 if (!in(0) || in(0)->is_top()) return Type::TOP;
3410 return bottom_type();
3411 }
3412
3413 //------------------------------Ideal------------------------------------------
3414 // Check for no longer being part of a loop
3415 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3416 if (can_reshape && !in(0)->is_Region()) {
3417 // Dead code elimination can sometimes delete this projection so
3418 // if it's not there, there's nothing to do.
|