16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/objArrayKlass.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/connode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/loopnode.hpp"
37 #include "opto/machnode.hpp"
38 #include "opto/movenode.hpp"
39 #include "opto/narrowptrnode.hpp"
40 #include "opto/mulnode.hpp"
41 #include "opto/phaseX.hpp"
42 #include "opto/regalloc.hpp"
43 #include "opto/regmask.hpp"
44 #include "opto/runtime.hpp"
45 #include "opto/subnode.hpp"
46 #include "opto/vectornode.hpp"
47 #include "utilities/vmError.hpp"
48
49 // Portions of code courtesy of Clifford Click
50
51 // Optimization - Graph Style
52
53 //=============================================================================
54 //------------------------------Value------------------------------------------
55 // Compute the type of the RegionNode.
501 if (left_path == nullptr || right_path == nullptr) {
502 return false;
503 }
504 Node* diamond_if = left_path->in(0);
505 if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
506 // Not an IfNode merging a diamond or TOP.
507 return false;
508 }
509
510 // Check for a proper bool/cmp
511 const Node* bol = diamond_if->in(1);
512 if (!bol->is_Bool()) {
513 return false;
514 }
515 const Node* cmp = bol->in(1);
516 if (!cmp->is_Cmp()) {
517 return false;
518 }
519 return true;
520 }
521 //------------------------------Ideal------------------------------------------
522 // Return a node which is more "ideal" than the current node. Must preserve
523 // the CFG, but we can still strip out dead paths.
524 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
525 if( !can_reshape && !in(0) ) return nullptr; // Already degraded to a Copy
526 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
527
528 // Check for RegionNode with no Phi users and both inputs come from either
529 // arm of the same IF. If found, then the control-flow split is useless.
530 bool has_phis = false;
531 if (can_reshape) { // Need DU info to check for Phi users
532 try_clean_mem_phis(phase->is_IterGVN());
533 has_phis = (has_phi() != nullptr); // Cache result
534
535 if (!has_phis) { // No Phi users? Nothing merging?
536 for (uint i = 1; i < req()-1; i++) {
537 Node *if1 = in(i);
538 if( !if1 ) continue;
539 Node *iff = if1->in(0);
540 if( !iff || !iff->is_If() ) continue;
947 if (iff1 == iff2) {
948 igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
949 igvn->replace_input_of(region, idx1, iff1->in(0));
950 igvn->replace_input_of(region, idx2, igvn->C->top());
951 return (region == this); // Remove useless if (both projections map to the same control/value)
952 }
953 BoolNode* bol1 = iff1->in(1)->isa_Bool();
954 BoolNode* bol2 = iff2->in(1)->isa_Bool();
955 if (bol1 == nullptr || bol2 == nullptr) {
956 return false; // No bool inputs found
957 }
958 Node* cmp1 = bol1->in(1);
959 Node* cmp2 = bol2->in(1);
960 bool commute = false;
961 if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
962 return false; // No comparison
963 } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
964 cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
965 cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
966 cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
967 cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck()) {
968 // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
969 // SubTypeCheck is not commutative
970 return false;
971 } else if (cmp1 != cmp2) {
972 if (cmp1->in(1) == cmp2->in(2) &&
973 cmp1->in(2) == cmp2->in(1)) {
974 commute = true; // Same but swapped inputs, commute the test
975 } else {
976 return false; // Ifs are not comparing the same values
977 }
978 }
979 proj1 = proj1->other_if_proj();
980 proj2 = proj2->other_if_proj();
981 if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
982 proj2->unique_ctrl_out_or_null() == this) ||
983 (proj2->unique_ctrl_out_or_null() == iff1 &&
984 proj1->unique_ctrl_out_or_null() == this))) {
985 return false; // Ifs are not connected through other projs
986 }
987 // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged
1026 st->print("#reducible ");
1027 break;
1028 case RegionNode::LoopStatus::NeverIrreducibleEntry:
1029 break; // nothing
1030 }
1031 }
1032 #endif
1033
1034 // Find the one non-null required input. RegionNode only
1035 Node *Node::nonnull_req() const {
1036 assert( is_Region(), "" );
1037 for( uint i = 1; i < _cnt; i++ )
1038 if( in(i) )
1039 return in(i);
1040 ShouldNotReachHere();
1041 return nullptr;
1042 }
1043
1044
1045 //=============================================================================
1046 // note that these functions assume that the _adr_type field is flattened
1047 uint PhiNode::hash() const {
1048 const Type* at = _adr_type;
1049 return TypeNode::hash() + (at ? at->hash() : 0);
1050 }
1051 bool PhiNode::cmp( const Node &n ) const {
1052 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1053 }
1054 static inline
1055 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1056 if (at == nullptr || at == TypePtr::BOTTOM) return at;
1057 return Compile::current()->alias_type(at)->adr_type();
1058 }
1059
1060 //----------------------------make---------------------------------------------
1061 // create a new phi with edges matching r and set (initially) to x
1062 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1063 uint preds = r->req(); // Number of predecessor paths
1064 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at");
1065 PhiNode* p = new PhiNode(r, t, at);
1066 for (uint j = 1; j < preds; j++) {
1067 // Fill in all inputs, except those which the region does not yet have
1068 if (r->in(j) != nullptr)
1069 p->init_req(j, x);
1070 }
1071 return p;
1072 }
1073 PhiNode* PhiNode::make(Node* r, Node* x) {
1074 const Type* t = x->bottom_type();
1075 const TypePtr* at = nullptr;
1076 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1077 return make(r, x, t, at);
1078 }
1079 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1080 const Type* t = x->bottom_type();
1081 const TypePtr* at = nullptr;
1082 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1083 return new PhiNode(r, t, at);
1084 }
1172 np->as_Phi()->verify_adr_type(visited, at);
1173 } else if (n->bottom_type() == Type::TOP
1174 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1175 // ignore top inputs
1176 } else {
1177 const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1178 // recheck phi/non-phi consistency at leaves:
1179 assert((nat != nullptr) == (at != nullptr), "");
1180 assert(nat == at || nat == TypePtr::BOTTOM,
1181 "adr_type must be consistent at leaves of phi nest");
1182 }
1183 }
1184 }
1185
1186 // Verify a whole nest of phis rooted at this one.
1187 void PhiNode::verify_adr_type(bool recursive) const {
1188 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error
1189 if (Node::in_dump()) return; // muzzle asserts when printing
1190
1191 assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");
1192
1193 if (!VerifyAliases) return; // verify thoroughly only if requested
1194
1195 assert(_adr_type == flatten_phi_adr_type(_adr_type),
1196 "Phi::adr_type must be pre-normalized");
1197
1198 if (recursive) {
1199 VectorSet visited;
1200 verify_adr_type(visited, _adr_type);
1201 }
1202 }
1203 #endif
1204
1205
1206 //------------------------------Value------------------------------------------
1207 // Compute the type of the PhiNode
1208 const Type* PhiNode::Value(PhaseGVN* phase) const {
1209 Node *r = in(0); // RegionNode
1210 if( !r ) // Copy or dead
1211 return in(1) ? phase->type(in(1)) : Type::TOP;
1390 assert(is_diamond_phi() > 0, "sanity");
1391 assert(req() == 3, "same as region");
1392 const Node* region = in(0);
1393 for (uint i = 1; i < 3; i++) {
1394 Node* phi_input = in(i);
1395 if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1396 // Nothing is control-dependent on path #i except the region itself.
1397 MergeMemNode* merge_mem = phi_input->as_MergeMem();
1398 uint j = 3 - i;
1399 Node* other_phi_input = in(j);
1400 if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory()) {
1401 // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1402 // This will allow the diamond to collapse completely if there are no other phis left.
1403 igvn->replace_node(this, merge_mem);
1404 return true;
1405 }
1406 }
1407 }
1408 return false;
1409 }
1410 //----------------------------check_cmove_id-----------------------------------
1411 // Check for CMove'ing a constant after comparing against the constant.
1412 // Happens all the time now, since if we compare equality vs a constant in
1413 // the parser, we "know" the variable is constant on one path and we force
1414 // it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1415 // conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more
1416 // general in that we don't need constants. Since CMove's are only inserted
1417 // in very special circumstances, we do it here on generic Phi's.
1418 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1419 assert(true_path !=0, "only diamond shape graph expected");
1420
1421 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1422 // phi->region->if_proj->ifnode->bool->cmp
1423 Node* region = in(0);
1424 Node* iff = region->in(1)->in(0);
1425 BoolNode* b = iff->in(1)->as_Bool();
1426 Node* cmp = b->in(1);
1427 Node* tval = in(true_path);
1428 Node* fval = in(3-true_path);
1429 Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);
1999
2000 if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2001 if (worklist.member(rc->in(0)->in(1))) {
2002 delay = true;
2003 break;
2004 }
2005
2006 if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2007 if (worklist.member(rc->in(0)->in(1)->in(1))) {
2008 delay = true;
2009 break;
2010 }
2011 }
2012
2013 if (delay) {
2014 worklist.push(this);
2015 }
2016 return delay;
2017 }
2018
2019 // If the Phi's Region is in an irreducible loop, and the Region
2020 // has had an input removed, but not yet transformed, it could be
2021 // that the Region (and this Phi) are not reachable from Root.
2022 // If we allow the Phi to collapse before the Region, this may lead
2023 // to dead-loop data. Wait for the Region to check for reachability,
2024 // and potentially remove the dead code.
2025 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2026 RegionNode* region = in(0)->as_Region();
2027 if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2028 Node* top = phase->C->top();
2029 for (uint j = 1; j < req(); j++) {
2030 Node* rc = region->in(j); // for each control input
2031 if (rc == nullptr || phase->type(rc) == Type::TOP) {
2032 // Region is missing a control input
2033 Node* n = in(j);
2034 if (n != nullptr && n != top) {
2035 // Phi still has its input, so region just lost its input
2036 return true;
2037 }
2038 }
2330 for (uint i = 1; i < req(); i++) {
2331 offset->init_req(i, in(i)->in(AddPNode::Offset));
2332 }
2333 phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2334 }
2335 return new AddPNode(base, address, offset);
2336 }
2337 }
2338 }
2339
2340 // Split phis through memory merges, so that the memory merges will go away.
2341 // Piggy-back this transformation on the search for a unique input....
2342 // It will be as if the merged memory is the unique value of the phi.
2343 // (Do not attempt this optimization unless parsing is complete.
2344 // It would make the parser's memory-merge logic sick.)
2345 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2346 if (progress == nullptr && can_reshape && type() == Type::MEMORY) {
2347 // see if this phi should be sliced
2348 uint merge_width = 0;
2349 bool saw_self = false;
2350 for( uint i=1; i<req(); ++i ) {// For all paths in
2351 Node *ii = in(i);
2352 // TOP inputs should not be counted as safe inputs because if the
2353 // Phi references itself through all other inputs then splitting the
2354 // Phi through memory merges would create dead loop at later stage.
2355 if (ii == top) {
2356 return nullptr; // Delay optimization until graph is cleaned.
2357 }
2358 if (ii->is_MergeMem()) {
2359 MergeMemNode* n = ii->as_MergeMem();
2360 merge_width = MAX2(merge_width, n->req());
2361 saw_self = saw_self || (n->base_memory() == this);
2362 }
2363 }
2364
2365 // This restriction is temporarily necessary to ensure termination:
2366 if (!saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0;
2367
2368 if (merge_width > Compile::AliasIdxRaw) {
2369 // found at least one non-empty MergeMem
2370 const TypePtr* at = adr_type();
2371 if (at != TypePtr::BOTTOM) {
2372 // Patch the existing phi to select an input from the merge:
2373 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2374 // Phi:AT1(...m1...)
2375 int alias_idx = phase->C->get_alias_index(at);
2376 for (uint i=1; i<req(); ++i) {
2377 Node *ii = in(i);
2378 if (ii->is_MergeMem()) {
2379 MergeMemNode* n = ii->as_MergeMem();
2380 // compress paths and change unreachable cycles to TOP
2381 // If not, we can update the input infinitely along a MergeMem cycle
2382 // Equivalent code is in MemNode::Ideal_common
2383 Node *m = phase->transform(n);
2384 if (outcnt() == 0) { // Above transform() may kill us!
2385 return top;
2386 }
2417 if (!saw_safe_input) {
2418 // There is a dead loop: All inputs are either dead or reference back to this phi
2419 return top;
2420 }
2421
2422 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2423 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2424 PhaseIterGVN* igvn = phase->is_IterGVN();
2425 assert(igvn != nullptr, "sanity check");
2426 Node* hook = new Node(1);
2427 PhiNode* new_base = (PhiNode*) clone();
2428 // Must eagerly register phis, since they participate in loops.
2429 igvn->register_new_node_with_optimizer(new_base);
2430 hook->add_req(new_base);
2431
2432 MergeMemNode* result = MergeMemNode::make(new_base);
2433 for (uint i = 1; i < req(); ++i) {
2434 Node *ii = in(i);
2435 if (ii->is_MergeMem()) {
2436 MergeMemNode* n = ii->as_MergeMem();
2437 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2438 // If we have not seen this slice yet, make a phi for it.
2439 bool made_new_phi = false;
2440 if (mms.is_empty()) {
2441 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2442 made_new_phi = true;
2443 igvn->register_new_node_with_optimizer(new_phi);
2444 hook->add_req(new_phi);
2445 mms.set_memory(new_phi);
2446 }
2447 Node* phi = mms.memory();
2448 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2449 phi->set_req(i, mms.memory2());
2450 }
2451 }
2452 }
2453 // Distribute all self-loops.
2454 { // (Extra braces to hide mms.)
2455 for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2456 Node* phi = mms.memory();
2535 if (is_decodeN) {
2536 new_ii = new EncodePNode(ii, narrow_t);
2537 } else {
2538 new_ii = new EncodePKlassNode(ii, narrow_t);
2539 }
2540 igvn->register_new_node_with_optimizer(new_ii);
2541 }
2542 }
2543 new_phi->set_req(i, new_ii);
2544 }
2545 igvn->register_new_node_with_optimizer(new_phi, this);
2546 if (is_decodeN) {
2547 progress = new DecodeNNode(new_phi, bottom_type());
2548 } else {
2549 progress = new DecodeNKlassNode(new_phi, bottom_type());
2550 }
2551 }
2552 }
2553 #endif
2554
2555 // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2556 if (EnableVectorReboxing && can_reshape && progress == nullptr && type()->isa_oopptr()) {
2557 progress = merge_through_phi(this, phase->is_IterGVN());
2558 }
2559
2560 return progress; // Return any progress
2561 }
2562
2563 Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) {
2564 Node_Stack stack(1);
2565 VectorSet visited;
2566 Node_List node_map;
2567
2568 stack.push(root_phi, 1); // ignore control
2569 visited.set(root_phi->_idx);
2570
2571 Node* new_phi = new PhiNode(root_phi->in(0), t);
2572 node_map.map(root_phi->_idx, new_phi);
2573
2574 while (stack.is_nonempty()) {
2879 #ifndef PRODUCT
2880 void CatchProjNode::dump_spec(outputStream *st) const {
2881 ProjNode::dump_spec(st);
2882 st->print("@bci %d ",_handler_bci);
2883 }
2884 #endif
2885
2886 //=============================================================================
2887 //------------------------------Identity---------------------------------------
2888 // Check for CreateEx being Identity.
2889 Node* CreateExNode::Identity(PhaseGVN* phase) {
2890 if( phase->type(in(1)) == Type::TOP ) return in(1);
2891 if( phase->type(in(0)) == Type::TOP ) return in(0);
2892 if (phase->type(in(0)->in(0)) == Type::TOP) {
2893 assert(in(0)->is_CatchProj(), "control is CatchProj");
2894 return phase->C->top(); // dead code
2895 }
2896 // We only come from CatchProj, unless the CatchProj goes away.
2897 // If the CatchProj is optimized away, then we just carry the
2898 // exception oop through.
2899 CallNode *call = in(1)->in(0)->as_Call();
2900
2901 return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
2902 in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
2903 }
2904
2905 //=============================================================================
2906 //------------------------------Value------------------------------------------
2907 // Check for being unreachable.
2908 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
2909 if (!in(0) || in(0)->is_top()) return Type::TOP;
2910 return bottom_type();
2911 }
2912
2913 //------------------------------Ideal------------------------------------------
2914 // Check for no longer being part of a loop
2915 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2916 if (can_reshape && !in(0)->is_Region()) {
2917 // Dead code elimination can sometimes delete this projection so
2918 // if it's not there, there's nothing to do.
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/objArrayKlass.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/connode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/inlinetypenode.hpp"
37 #include "opto/loopnode.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/movenode.hpp"
40 #include "opto/narrowptrnode.hpp"
41 #include "opto/mulnode.hpp"
42 #include "opto/phaseX.hpp"
43 #include "opto/regalloc.hpp"
44 #include "opto/regmask.hpp"
45 #include "opto/runtime.hpp"
46 #include "opto/subnode.hpp"
47 #include "opto/vectornode.hpp"
48 #include "utilities/vmError.hpp"
49
50 // Portions of code courtesy of Clifford Click
51
52 // Optimization - Graph Style
53
54 //=============================================================================
55 //------------------------------Value------------------------------------------
56 // Compute the type of the RegionNode.
502 if (left_path == nullptr || right_path == nullptr) {
503 return false;
504 }
505 Node* diamond_if = left_path->in(0);
506 if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
507 // Not an IfNode merging a diamond or TOP.
508 return false;
509 }
510
511 // Check for a proper bool/cmp
512 const Node* bol = diamond_if->in(1);
513 if (!bol->is_Bool()) {
514 return false;
515 }
516 const Node* cmp = bol->in(1);
517 if (!cmp->is_Cmp()) {
518 return false;
519 }
520 return true;
521 }
522
523 //------------------------------Ideal------------------------------------------
524 // Return a node which is more "ideal" than the current node. Must preserve
525 // the CFG, but we can still strip out dead paths.
526 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
527 if( !can_reshape && !in(0) ) return nullptr; // Already degraded to a Copy
528 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
529
530 // Check for RegionNode with no Phi users and both inputs come from either
531 // arm of the same IF. If found, then the control-flow split is useless.
532 bool has_phis = false;
533 if (can_reshape) { // Need DU info to check for Phi users
534 try_clean_mem_phis(phase->is_IterGVN());
535 has_phis = (has_phi() != nullptr); // Cache result
536
537 if (!has_phis) { // No Phi users? Nothing merging?
538 for (uint i = 1; i < req()-1; i++) {
539 Node *if1 = in(i);
540 if( !if1 ) continue;
541 Node *iff = if1->in(0);
542 if( !iff || !iff->is_If() ) continue;
949 if (iff1 == iff2) {
950 igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
951 igvn->replace_input_of(region, idx1, iff1->in(0));
952 igvn->replace_input_of(region, idx2, igvn->C->top());
953 return (region == this); // Remove useless if (both projections map to the same control/value)
954 }
955 BoolNode* bol1 = iff1->in(1)->isa_Bool();
956 BoolNode* bol2 = iff2->in(1)->isa_Bool();
957 if (bol1 == nullptr || bol2 == nullptr) {
958 return false; // No bool inputs found
959 }
960 Node* cmp1 = bol1->in(1);
961 Node* cmp2 = bol2->in(1);
962 bool commute = false;
963 if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
964 return false; // No comparison
965 } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
966 cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
967 cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
968 cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
969 cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() ||
970 cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) {
971 // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
972 // SubTypeCheck is not commutative
973 return false;
974 } else if (cmp1 != cmp2) {
975 if (cmp1->in(1) == cmp2->in(2) &&
976 cmp1->in(2) == cmp2->in(1)) {
977 commute = true; // Same but swapped inputs, commute the test
978 } else {
979 return false; // Ifs are not comparing the same values
980 }
981 }
982 proj1 = proj1->other_if_proj();
983 proj2 = proj2->other_if_proj();
984 if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
985 proj2->unique_ctrl_out_or_null() == this) ||
986 (proj2->unique_ctrl_out_or_null() == iff1 &&
987 proj1->unique_ctrl_out_or_null() == this))) {
988 return false; // Ifs are not connected through other projs
989 }
990 // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged
1029 st->print("#reducible ");
1030 break;
1031 case RegionNode::LoopStatus::NeverIrreducibleEntry:
1032 break; // nothing
1033 }
1034 }
1035 #endif
1036
1037 // Find the one non-null required input. RegionNode only
1038 Node *Node::nonnull_req() const {
1039 assert( is_Region(), "" );
1040 for( uint i = 1; i < _cnt; i++ )
1041 if( in(i) )
1042 return in(i);
1043 ShouldNotReachHere();
1044 return nullptr;
1045 }
1046
1047
1048 //=============================================================================
1049 // note that these functions assume that the _adr_type field is flat
1050 uint PhiNode::hash() const {
1051 const Type* at = _adr_type;
1052 return TypeNode::hash() + (at ? at->hash() : 0);
1053 }
1054 bool PhiNode::cmp( const Node &n ) const {
1055 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1056 }
1057 static inline
1058 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1059 if (at == nullptr || at == TypePtr::BOTTOM) return at;
1060 return Compile::current()->alias_type(at)->adr_type();
1061 }
1062
1063 //----------------------------make---------------------------------------------
1064 // create a new phi with edges matching r and set (initially) to x
1065 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1066 uint preds = r->req(); // Number of predecessor paths
1067 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flat_accesses_share_alias()), "flatten at");
1068 PhiNode* p = new PhiNode(r, t, at);
1069 for (uint j = 1; j < preds; j++) {
1070 // Fill in all inputs, except those which the region does not yet have
1071 if (r->in(j) != nullptr)
1072 p->init_req(j, x);
1073 }
1074 return p;
1075 }
1076 PhiNode* PhiNode::make(Node* r, Node* x) {
1077 const Type* t = x->bottom_type();
1078 const TypePtr* at = nullptr;
1079 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1080 return make(r, x, t, at);
1081 }
1082 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1083 const Type* t = x->bottom_type();
1084 const TypePtr* at = nullptr;
1085 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1086 return new PhiNode(r, t, at);
1087 }
1175 np->as_Phi()->verify_adr_type(visited, at);
1176 } else if (n->bottom_type() == Type::TOP
1177 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1178 // ignore top inputs
1179 } else {
1180 const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1181 // recheck phi/non-phi consistency at leaves:
1182 assert((nat != nullptr) == (at != nullptr), "");
1183 assert(nat == at || nat == TypePtr::BOTTOM,
1184 "adr_type must be consistent at leaves of phi nest");
1185 }
1186 }
1187 }
1188
1189 // Verify a whole nest of phis rooted at this one.
1190 void PhiNode::verify_adr_type(bool recursive) const {
1191 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error
1192 if (Node::in_dump()) return; // muzzle asserts when printing
1193
1194 assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");
1195 // Flat array element shouldn't get their own memory slice until flat_accesses_share_alias is cleared.
1196 // It could be the graph has no loads/stores and flat_accesses_share_alias is never cleared. EA could still
1197 // creates per element Phis but that wouldn't be a problem as there are no memory accesses for that array.
1198 assert(_adr_type == nullptr || _adr_type->isa_aryptr() == nullptr ||
1199 _adr_type->is_aryptr()->is_known_instance() ||
1200 !_adr_type->is_aryptr()->is_flat() ||
1201 !Compile::current()->flat_accesses_share_alias() ||
1202 _adr_type == TypeAryPtr::INLINES, "flat array element shouldn't get its own slice yet");
1203
1204 if (!VerifyAliases) return; // verify thoroughly only if requested
1205
1206 assert(_adr_type == flatten_phi_adr_type(_adr_type),
1207 "Phi::adr_type must be pre-normalized");
1208
1209 if (recursive) {
1210 VectorSet visited;
1211 verify_adr_type(visited, _adr_type);
1212 }
1213 }
1214 #endif
1215
1216
1217 //------------------------------Value------------------------------------------
1218 // Compute the type of the PhiNode
1219 const Type* PhiNode::Value(PhaseGVN* phase) const {
1220 Node *r = in(0); // RegionNode
1221 if( !r ) // Copy or dead
1222 return in(1) ? phase->type(in(1)) : Type::TOP;
1401 assert(is_diamond_phi() > 0, "sanity");
1402 assert(req() == 3, "same as region");
1403 const Node* region = in(0);
1404 for (uint i = 1; i < 3; i++) {
1405 Node* phi_input = in(i);
1406 if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1407 // Nothing is control-dependent on path #i except the region itself.
1408 MergeMemNode* merge_mem = phi_input->as_MergeMem();
1409 uint j = 3 - i;
1410 Node* other_phi_input = in(j);
1411 if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory()) {
1412 // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1413 // This will allow the diamond to collapse completely if there are no other phis left.
1414 igvn->replace_node(this, merge_mem);
1415 return true;
1416 }
1417 }
1418 }
1419 return false;
1420 }
1421
1422 //----------------------------check_cmove_id-----------------------------------
1423 // Check for CMove'ing a constant after comparing against the constant.
1424 // Happens all the time now, since if we compare equality vs a constant in
1425 // the parser, we "know" the variable is constant on one path and we force
1426 // it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1427 // conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more
1428 // general in that we don't need constants. Since CMove's are only inserted
1429 // in very special circumstances, we do it here on generic Phi's.
1430 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1431 assert(true_path !=0, "only diamond shape graph expected");
1432
1433 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1434 // phi->region->if_proj->ifnode->bool->cmp
1435 Node* region = in(0);
1436 Node* iff = region->in(1)->in(0);
1437 BoolNode* b = iff->in(1)->as_Bool();
1438 Node* cmp = b->in(1);
1439 Node* tval = in(true_path);
1440 Node* fval = in(3-true_path);
1441 Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);
2011
2012 if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2013 if (worklist.member(rc->in(0)->in(1))) {
2014 delay = true;
2015 break;
2016 }
2017
2018 if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2019 if (worklist.member(rc->in(0)->in(1)->in(1))) {
2020 delay = true;
2021 break;
2022 }
2023 }
2024
2025 if (delay) {
2026 worklist.push(this);
2027 }
2028 return delay;
2029 }
2030
2031 // Push inline type input nodes (and null) down through the phi recursively (can handle data loops).
2032 InlineTypeNode* PhiNode::push_inline_types_through(PhaseGVN* phase, bool can_reshape, ciInlineKlass* vk) {
2033 InlineTypeNode* vt = InlineTypeNode::make_null(*phase, vk)->clone_with_phis(phase, in(0), !_type->maybe_null());
2034 if (can_reshape) {
2035 // Replace phi right away to be able to use the inline
2036 // type node when reaching the phi again through data loops.
2037 PhaseIterGVN* igvn = phase->is_IterGVN();
2038 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2039 Node* u = fast_out(i);
2040 igvn->rehash_node_delayed(u);
2041 imax -= u->replace_edge(this, vt);
2042 --i;
2043 }
2044 igvn->rehash_node_delayed(this);
2045 assert(outcnt() == 0, "should be dead now");
2046 }
2047 ResourceMark rm;
2048 Node_List casts;
2049 for (uint i = 1; i < req(); ++i) {
2050 Node* n = in(i);
2051 while (n->is_ConstraintCast()) {
2052 casts.push(n);
2053 n = n->in(1);
2054 }
2055 if (phase->type(n)->is_zero_type()) {
2056 n = InlineTypeNode::make_null(*phase, vk);
2057 } else if (n->is_Phi()) {
2058 assert(can_reshape, "can only handle phis during IGVN");
2059 n = phase->transform(n->as_Phi()->push_inline_types_through(phase, can_reshape, vk));
2060 }
2061 while (casts.size() != 0) {
2062 // Push the cast(s) through the InlineTypeNode
2063 Node* cast = casts.pop()->clone();
2064 cast->set_req_X(1, n->as_InlineType()->get_oop(), phase);
2065 n = n->clone();
2066 n->as_InlineType()->set_oop(phase->transform(cast));
2067 n = phase->transform(n);
2068 }
2069 bool transform = !can_reshape && (i == (req()-1)); // Transform phis on last merge
2070 vt->merge_with(phase, n->as_InlineType(), i, transform);
2071 }
2072 return vt;
2073 }
2074
2075 // If the Phi's Region is in an irreducible loop, and the Region
2076 // has had an input removed, but not yet transformed, it could be
2077 // that the Region (and this Phi) are not reachable from Root.
2078 // If we allow the Phi to collapse before the Region, this may lead
2079 // to dead-loop data. Wait for the Region to check for reachability,
2080 // and potentially remove the dead code.
2081 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2082 RegionNode* region = in(0)->as_Region();
2083 if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2084 Node* top = phase->C->top();
2085 for (uint j = 1; j < req(); j++) {
2086 Node* rc = region->in(j); // for each control input
2087 if (rc == nullptr || phase->type(rc) == Type::TOP) {
2088 // Region is missing a control input
2089 Node* n = in(j);
2090 if (n != nullptr && n != top) {
2091 // Phi still has its input, so region just lost its input
2092 return true;
2093 }
2094 }
2386 for (uint i = 1; i < req(); i++) {
2387 offset->init_req(i, in(i)->in(AddPNode::Offset));
2388 }
2389 phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2390 }
2391 return new AddPNode(base, address, offset);
2392 }
2393 }
2394 }
2395
2396 // Split phis through memory merges, so that the memory merges will go away.
2397 // Piggy-back this transformation on the search for a unique input....
2398 // It will be as if the merged memory is the unique value of the phi.
2399 // (Do not attempt this optimization unless parsing is complete.
2400 // It would make the parser's memory-merge logic sick.)
2401 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2402 if (progress == nullptr && can_reshape && type() == Type::MEMORY) {
2403 // see if this phi should be sliced
2404 uint merge_width = 0;
2405 bool saw_self = false;
2406 // TODO revisit this with JDK-8247216
2407 bool mergemem_only = true;
2408 for( uint i=1; i<req(); ++i ) {// For all paths in
2409 Node *ii = in(i);
2410 // TOP inputs should not be counted as safe inputs because if the
2411 // Phi references itself through all other inputs then splitting the
2412 // Phi through memory merges would create dead loop at later stage.
2413 if (ii == top) {
2414 return nullptr; // Delay optimization until graph is cleaned.
2415 }
2416 if (ii->is_MergeMem()) {
2417 MergeMemNode* n = ii->as_MergeMem();
2418 merge_width = MAX2(merge_width, n->req());
2419 saw_self = saw_self || (n->base_memory() == this);
2420 } else {
2421 mergemem_only = false;
2422 }
2423 }
2424
2425 // This restriction is temporarily necessary to ensure termination:
2426 if (!mergemem_only && !saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0;
2427
2428 if (merge_width > Compile::AliasIdxRaw) {
2429 // found at least one non-empty MergeMem
2430 const TypePtr* at = adr_type();
2431 if (at != TypePtr::BOTTOM) {
2432 // Patch the existing phi to select an input from the merge:
2433 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2434 // Phi:AT1(...m1...)
2435 int alias_idx = phase->C->get_alias_index(at);
2436 for (uint i=1; i<req(); ++i) {
2437 Node *ii = in(i);
2438 if (ii->is_MergeMem()) {
2439 MergeMemNode* n = ii->as_MergeMem();
2440 // compress paths and change unreachable cycles to TOP
2441 // If not, we can update the input infinitely along a MergeMem cycle
2442 // Equivalent code is in MemNode::Ideal_common
2443 Node *m = phase->transform(n);
2444 if (outcnt() == 0) { // Above transform() may kill us!
2445 return top;
2446 }
2477 if (!saw_safe_input) {
2478 // There is a dead loop: All inputs are either dead or reference back to this phi
2479 return top;
2480 }
2481
2482 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2483 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2484 PhaseIterGVN* igvn = phase->is_IterGVN();
2485 assert(igvn != nullptr, "sanity check");
2486 Node* hook = new Node(1);
2487 PhiNode* new_base = (PhiNode*) clone();
2488 // Must eagerly register phis, since they participate in loops.
2489 igvn->register_new_node_with_optimizer(new_base);
2490 hook->add_req(new_base);
2491
2492 MergeMemNode* result = MergeMemNode::make(new_base);
2493 for (uint i = 1; i < req(); ++i) {
2494 Node *ii = in(i);
2495 if (ii->is_MergeMem()) {
2496 MergeMemNode* n = ii->as_MergeMem();
2497 if (igvn) {
2498 // TODO revisit this with JDK-8247216
2499 // Put 'n' on the worklist because it might be modified by MergeMemStream::iteration_setup
2500 igvn->_worklist.push(n);
2501 }
2502 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2503 // If we have not seen this slice yet, make a phi for it.
2504 bool made_new_phi = false;
2505 if (mms.is_empty()) {
2506 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2507 made_new_phi = true;
2508 igvn->register_new_node_with_optimizer(new_phi);
2509 hook->add_req(new_phi);
2510 mms.set_memory(new_phi);
2511 }
2512 Node* phi = mms.memory();
2513 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2514 phi->set_req(i, mms.memory2());
2515 }
2516 }
2517 }
2518 // Distribute all self-loops.
2519 { // (Extra braces to hide mms.)
2520 for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2521 Node* phi = mms.memory();
2600 if (is_decodeN) {
2601 new_ii = new EncodePNode(ii, narrow_t);
2602 } else {
2603 new_ii = new EncodePKlassNode(ii, narrow_t);
2604 }
2605 igvn->register_new_node_with_optimizer(new_ii);
2606 }
2607 }
2608 new_phi->set_req(i, new_ii);
2609 }
2610 igvn->register_new_node_with_optimizer(new_phi, this);
2611 if (is_decodeN) {
2612 progress = new DecodeNNode(new_phi, bottom_type());
2613 } else {
2614 progress = new DecodeNKlassNode(new_phi, bottom_type());
2615 }
2616 }
2617 }
2618 #endif
2619
2620 // Check recursively if inputs are either an inline type, constant null
2621 // or another Phi (including self references through data loops). If so,
2622 // push the inline types down through the phis to enable folding of loads.
2623 if (EnableValhalla && _type->isa_ptr() && req() > 2) {
2624 ResourceMark rm;
2625 Unique_Node_List worklist;
2626 worklist.push(this);
2627 bool can_optimize = true;
2628 ciInlineKlass* vk = nullptr;
2629 Node_List casts;
2630
2631 // TODO 8302217 We need to prevent endless pushing through
2632 bool only_phi = (outcnt() != 0);
2633 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2634 Node* n = fast_out(i);
2635 if (n->is_InlineType() && n->in(1) == this) {
2636 can_optimize = false;
2637 break;
2638 }
2639 if (!n->is_Phi()) {
2640 only_phi = false;
2641 }
2642 }
2643 if (only_phi) {
2644 can_optimize = false;
2645 }
2646 for (uint next = 0; next < worklist.size() && can_optimize; next++) {
2647 Node* phi = worklist.at(next);
2648 for (uint i = 1; i < phi->req() && can_optimize; i++) {
2649 Node* n = phi->in(i);
2650 if (n == nullptr) {
2651 can_optimize = false;
2652 break;
2653 }
2654 while (n->is_ConstraintCast()) {
2655 if (n->in(0) != nullptr && n->in(0)->is_top()) {
2656 // Will die, don't optimize
2657 can_optimize = false;
2658 break;
2659 }
2660 casts.push(n);
2661 n = n->in(1);
2662 }
2663 const Type* t = phase->type(n);
2664 if (n->is_InlineType() && (vk == nullptr || vk == t->inline_klass())) {
2665 vk = (vk == nullptr) ? t->inline_klass() : vk;
2666 } else if (n->is_Phi() && can_reshape && n->bottom_type()->isa_ptr()) {
2667 worklist.push(n);
2668 } else if (!t->is_zero_type()) {
2669 can_optimize = false;
2670 }
2671 }
2672 }
2673 // Check if cast nodes can be pushed through
2674 const Type* t = Type::get_const_type(vk);
2675 while (casts.size() != 0 && can_optimize && t != nullptr) {
2676 Node* cast = casts.pop();
2677 if (t->filter(cast->bottom_type()) == Type::TOP) {
2678 can_optimize = false;
2679 }
2680 }
2681 if (can_optimize && vk != nullptr) {
2682 return push_inline_types_through(phase, can_reshape, vk);
2683 }
2684 }
2685
2686 // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2687 if (EnableVectorReboxing && can_reshape && progress == nullptr && type()->isa_oopptr()) {
2688 progress = merge_through_phi(this, phase->is_IterGVN());
2689 }
2690
2691 return progress; // Return any progress
2692 }
2693
2694 Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) {
2695 Node_Stack stack(1);
2696 VectorSet visited;
2697 Node_List node_map;
2698
2699 stack.push(root_phi, 1); // ignore control
2700 visited.set(root_phi->_idx);
2701
2702 Node* new_phi = new PhiNode(root_phi->in(0), t);
2703 node_map.map(root_phi->_idx, new_phi);
2704
2705 while (stack.is_nonempty()) {
3010 #ifndef PRODUCT
3011 void CatchProjNode::dump_spec(outputStream *st) const {
3012 ProjNode::dump_spec(st);
3013 st->print("@bci %d ",_handler_bci);
3014 }
3015 #endif
3016
3017 //=============================================================================
3018 //------------------------------Identity---------------------------------------
3019 // Check for CreateEx being Identity.
3020 Node* CreateExNode::Identity(PhaseGVN* phase) {
3021 if( phase->type(in(1)) == Type::TOP ) return in(1);
3022 if( phase->type(in(0)) == Type::TOP ) return in(0);
3023 if (phase->type(in(0)->in(0)) == Type::TOP) {
3024 assert(in(0)->is_CatchProj(), "control is CatchProj");
3025 return phase->C->top(); // dead code
3026 }
3027 // We only come from CatchProj, unless the CatchProj goes away.
3028 // If the CatchProj is optimized away, then we just carry the
3029 // exception oop through.
3030
3031 // CheckCastPPNode::Ideal() for inline types reuses the exception
3032 // paths of a call to perform an allocation: we can see a Phi here.
3033 if (in(1)->is_Phi()) {
3034 return this;
3035 }
3036 CallNode *call = in(1)->in(0)->as_Call();
3037
3038 return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
3039 in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
3040 }
3041
3042 //=============================================================================
3043 //------------------------------Value------------------------------------------
3044 // Check for being unreachable.
3045 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
3046 if (!in(0) || in(0)->is_top()) return Type::TOP;
3047 return bottom_type();
3048 }
3049
3050 //------------------------------Ideal------------------------------------------
3051 // Check for no longer being part of a loop
3052 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3053 if (can_reshape && !in(0)->is_Region()) {
3054 // Dead code elimination can sometimes delete this projection so
3055 // if it's not there, there's nothing to do.
|