16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/objArrayKlass.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/connode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/loopnode.hpp"
37 #include "opto/machnode.hpp"
38 #include "opto/movenode.hpp"
39 #include "opto/narrowptrnode.hpp"
40 #include "opto/mulnode.hpp"
41 #include "opto/phaseX.hpp"
42 #include "opto/regalloc.hpp"
43 #include "opto/regmask.hpp"
44 #include "opto/runtime.hpp"
45 #include "opto/subnode.hpp"
46 #include "opto/vectornode.hpp"
47 #include "utilities/vmError.hpp"
48
49 // Portions of code courtesy of Clifford Click
50
51 // Optimization - Graph Style
52
53 //=============================================================================
54 //------------------------------Value------------------------------------------
55 // Compute the type of the RegionNode.
501 if (left_path == nullptr || right_path == nullptr) {
502 return false;
503 }
504 Node* diamond_if = left_path->in(0);
505 if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
506 // Not an IfNode merging a diamond or TOP.
507 return false;
508 }
509
510 // Check for a proper bool/cmp
511 const Node* bol = diamond_if->in(1);
512 if (!bol->is_Bool()) {
513 return false;
514 }
515 const Node* cmp = bol->in(1);
516 if (!cmp->is_Cmp()) {
517 return false;
518 }
519 return true;
520 }
521 //------------------------------Ideal------------------------------------------
522 // Return a node which is more "ideal" than the current node. Must preserve
523 // the CFG, but we can still strip out dead paths.
524 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
525 if( !can_reshape && !in(0) ) return nullptr; // Already degraded to a Copy
526 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
527
528 // Check for RegionNode with no Phi users and both inputs come from either
529 // arm of the same IF. If found, then the control-flow split is useless.
530 bool has_phis = false;
531 if (can_reshape) { // Need DU info to check for Phi users
532 try_clean_mem_phis(phase->is_IterGVN());
533 has_phis = (has_phi() != nullptr); // Cache result
534
535 if (!has_phis) { // No Phi users? Nothing merging?
536 for (uint i = 1; i < req()-1; i++) {
537 Node *if1 = in(i);
538 if( !if1 ) continue;
539 Node *iff = if1->in(0);
540 if( !iff || !iff->is_If() ) continue;
947 if (iff1 == iff2) {
948 igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
949 igvn->replace_input_of(region, idx1, iff1->in(0));
950 igvn->replace_input_of(region, idx2, igvn->C->top());
951 return (region == this); // Remove useless if (both projections map to the same control/value)
952 }
953 BoolNode* bol1 = iff1->in(1)->isa_Bool();
954 BoolNode* bol2 = iff2->in(1)->isa_Bool();
955 if (bol1 == nullptr || bol2 == nullptr) {
956 return false; // No bool inputs found
957 }
958 Node* cmp1 = bol1->in(1);
959 Node* cmp2 = bol2->in(1);
960 bool commute = false;
961 if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
962 return false; // No comparison
963 } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
964 cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
965 cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
966 cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
967 cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck()) {
968 // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
969 // SubTypeCheck is not commutative
970 return false;
971 } else if (cmp1 != cmp2) {
972 if (cmp1->in(1) == cmp2->in(2) &&
973 cmp1->in(2) == cmp2->in(1)) {
974 commute = true; // Same but swapped inputs, commute the test
975 } else {
976 return false; // Ifs are not comparing the same values
977 }
978 }
979 proj1 = proj1->other_if_proj();
980 proj2 = proj2->other_if_proj();
981 if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
982 proj2->unique_ctrl_out_or_null() == this) ||
983 (proj2->unique_ctrl_out_or_null() == iff1 &&
984 proj1->unique_ctrl_out_or_null() == this))) {
985 return false; // Ifs are not connected through other projs
986 }
987 // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged
1026 st->print("#reducible ");
1027 break;
1028 case RegionNode::LoopStatus::NeverIrreducibleEntry:
1029 break; // nothing
1030 }
1031 }
1032 #endif
1033
1034 // Find the one non-null required input. RegionNode only
1035 Node *Node::nonnull_req() const {
1036 assert( is_Region(), "" );
1037 for( uint i = 1; i < _cnt; i++ )
1038 if( in(i) )
1039 return in(i);
1040 ShouldNotReachHere();
1041 return nullptr;
1042 }
1043
1044
1045 //=============================================================================
1046 // note that these functions assume that the _adr_type field is flattened
1047 uint PhiNode::hash() const {
1048 const Type* at = _adr_type;
1049 return TypeNode::hash() + (at ? at->hash() : 0);
1050 }
1051 bool PhiNode::cmp( const Node &n ) const {
1052 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1053 }
1054 static inline
1055 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1056 if (at == nullptr || at == TypePtr::BOTTOM) return at;
1057 return Compile::current()->alias_type(at)->adr_type();
1058 }
1059
1060 //----------------------------make---------------------------------------------
1061 // create a new phi with edges matching r and set (initially) to x
1062 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1063 uint preds = r->req(); // Number of predecessor paths
1064 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at");
1065 PhiNode* p = new PhiNode(r, t, at);
1066 for (uint j = 1; j < preds; j++) {
1067 // Fill in all inputs, except those which the region does not yet have
1068 if (r->in(j) != nullptr)
1069 p->init_req(j, x);
1070 }
1071 return p;
1072 }
1073 PhiNode* PhiNode::make(Node* r, Node* x) {
1074 const Type* t = x->bottom_type();
1075 const TypePtr* at = nullptr;
1076 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1077 return make(r, x, t, at);
1078 }
1079 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1080 const Type* t = x->bottom_type();
1081 const TypePtr* at = nullptr;
1082 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1083 return new PhiNode(r, t, at);
1084 }
1172 np->as_Phi()->verify_adr_type(visited, at);
1173 } else if (n->bottom_type() == Type::TOP
1174 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1175 // ignore top inputs
1176 } else {
1177 const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1178 // recheck phi/non-phi consistency at leaves:
1179 assert((nat != nullptr) == (at != nullptr), "");
1180 assert(nat == at || nat == TypePtr::BOTTOM,
1181 "adr_type must be consistent at leaves of phi nest");
1182 }
1183 }
1184 }
1185
1186 // Verify a whole nest of phis rooted at this one.
1187 void PhiNode::verify_adr_type(bool recursive) const {
1188 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error
1189 if (Node::in_dump()) return; // muzzle asserts when printing
1190
1191 assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");
1192
1193 if (!VerifyAliases) return; // verify thoroughly only if requested
1194
1195 assert(_adr_type == flatten_phi_adr_type(_adr_type),
1196 "Phi::adr_type must be pre-normalized");
1197
1198 if (recursive) {
1199 VectorSet visited;
1200 verify_adr_type(visited, _adr_type);
1201 }
1202 }
1203 #endif
1204
1205
1206 //------------------------------Value------------------------------------------
1207 // Compute the type of the PhiNode
1208 const Type* PhiNode::Value(PhaseGVN* phase) const {
1209 Node *r = in(0); // RegionNode
1210 if( !r ) // Copy or dead
1211 return in(1) ? phase->type(in(1)) : Type::TOP;
1390 assert(is_diamond_phi() > 0, "sanity");
1391 assert(req() == 3, "same as region");
1392 const Node* region = in(0);
1393 for (uint i = 1; i < 3; i++) {
1394 Node* phi_input = in(i);
1395 if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1396 // Nothing is control-dependent on path #i except the region itself.
1397 MergeMemNode* merge_mem = phi_input->as_MergeMem();
1398 uint j = 3 - i;
1399 Node* other_phi_input = in(j);
1400 if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory()) {
1401 // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1402 // This will allow the diamond to collapse completely if there are no other phis left.
1403 igvn->replace_node(this, merge_mem);
1404 return true;
1405 }
1406 }
1407 }
1408 return false;
1409 }
1410 //----------------------------check_cmove_id-----------------------------------
1411 // Check for CMove'ing a constant after comparing against the constant.
1412 // Happens all the time now, since if we compare equality vs a constant in
1413 // the parser, we "know" the variable is constant on one path and we force
1414 // it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1415 // conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more
1416 // general in that we don't need constants. Since CMove's are only inserted
1417 // in very special circumstances, we do it here on generic Phi's.
1418 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1419 assert(true_path !=0, "only diamond shape graph expected");
1420
1421 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1422 // phi->region->if_proj->ifnode->bool->cmp
1423 Node* region = in(0);
1424 Node* iff = region->in(1)->in(0);
1425 BoolNode* b = iff->in(1)->as_Bool();
1426 Node* cmp = b->in(1);
1427 Node* tval = in(true_path);
1428 Node* fval = in(3-true_path);
1429 Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);
2010
2011 if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2012 if (worklist.member(rc->in(0)->in(1))) {
2013 delay = true;
2014 break;
2015 }
2016
2017 if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2018 if (worklist.member(rc->in(0)->in(1)->in(1))) {
2019 delay = true;
2020 break;
2021 }
2022 }
2023
2024 if (delay) {
2025 worklist.push(this);
2026 }
2027 return delay;
2028 }
2029
2030 // If the Phi's Region is in an irreducible loop, and the Region
2031 // has had an input removed, but not yet transformed, it could be
2032 // that the Region (and this Phi) are not reachable from Root.
2033 // If we allow the Phi to collapse before the Region, this may lead
2034 // to dead-loop data. Wait for the Region to check for reachability,
2035 // and potentially remove the dead code.
2036 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2037 RegionNode* region = in(0)->as_Region();
2038 if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2039 Node* top = phase->C->top();
2040 for (uint j = 1; j < req(); j++) {
2041 Node* rc = region->in(j); // for each control input
2042 if (rc == nullptr || phase->type(rc) == Type::TOP) {
2043 // Region is missing a control input
2044 Node* n = in(j);
2045 if (n != nullptr && n != top) {
2046 // Phi still has its input, so region just lost its input
2047 return true;
2048 }
2049 }
2346 for (uint i = 1; i < req(); i++) {
2347 offset->init_req(i, in(i)->in(AddPNode::Offset));
2348 }
2349 phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2350 }
2351 return new AddPNode(base, address, offset);
2352 }
2353 }
2354 }
2355
2356 // Split phis through memory merges, so that the memory merges will go away.
2357 // Piggy-back this transformation on the search for a unique input....
2358 // It will be as if the merged memory is the unique value of the phi.
2359 // (Do not attempt this optimization unless parsing is complete.
2360 // It would make the parser's memory-merge logic sick.)
2361 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2362 if (progress == nullptr && can_reshape && type() == Type::MEMORY) {
2363 // see if this phi should be sliced
2364 uint merge_width = 0;
2365 bool saw_self = false;
2366 for( uint i=1; i<req(); ++i ) {// For all paths in
2367 Node *ii = in(i);
2368 // TOP inputs should not be counted as safe inputs because if the
2369 // Phi references itself through all other inputs then splitting the
2370 // Phi through memory merges would create dead loop at later stage.
2371 if (ii == top) {
2372 return nullptr; // Delay optimization until graph is cleaned.
2373 }
2374 if (ii->is_MergeMem()) {
2375 MergeMemNode* n = ii->as_MergeMem();
2376 merge_width = MAX2(merge_width, n->req());
2377 saw_self = saw_self || (n->base_memory() == this);
2378 }
2379 }
2380
2381 // This restriction is temporarily necessary to ensure termination:
2382 if (!saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0;
2383
2384 if (merge_width > Compile::AliasIdxRaw) {
2385 // found at least one non-empty MergeMem
2386 const TypePtr* at = adr_type();
2387 if (at != TypePtr::BOTTOM) {
2388 // Patch the existing phi to select an input from the merge:
2389 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2390 // Phi:AT1(...m1...)
2391 int alias_idx = phase->C->get_alias_index(at);
2392 for (uint i=1; i<req(); ++i) {
2393 Node *ii = in(i);
2394 if (ii->is_MergeMem()) {
2395 MergeMemNode* n = ii->as_MergeMem();
2396 // compress paths and change unreachable cycles to TOP
2397 // If not, we can update the input infinitely along a MergeMem cycle
2398 // Equivalent code is in MemNode::Ideal_common
2399 Node *m = phase->transform(n);
2400 if (outcnt() == 0) { // Above transform() may kill us!
2401 return top;
2402 }
2433 if (!saw_safe_input) {
2434 // There is a dead loop: All inputs are either dead or reference back to this phi
2435 return top;
2436 }
2437
2438 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2439 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2440 PhaseIterGVN* igvn = phase->is_IterGVN();
2441 assert(igvn != nullptr, "sanity check");
2442 Node* hook = new Node(1);
2443 PhiNode* new_base = (PhiNode*) clone();
2444 // Must eagerly register phis, since they participate in loops.
2445 igvn->register_new_node_with_optimizer(new_base);
2446 hook->add_req(new_base);
2447
2448 MergeMemNode* result = MergeMemNode::make(new_base);
2449 for (uint i = 1; i < req(); ++i) {
2450 Node *ii = in(i);
2451 if (ii->is_MergeMem()) {
2452 MergeMemNode* n = ii->as_MergeMem();
2453 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2454 // If we have not seen this slice yet, make a phi for it.
2455 bool made_new_phi = false;
2456 if (mms.is_empty()) {
2457 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2458 made_new_phi = true;
2459 igvn->register_new_node_with_optimizer(new_phi);
2460 hook->add_req(new_phi);
2461 mms.set_memory(new_phi);
2462 }
2463 Node* phi = mms.memory();
2464 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2465 phi->set_req(i, mms.memory2());
2466 }
2467 }
2468 }
2469 // Distribute all self-loops.
2470 { // (Extra braces to hide mms.)
2471 for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2472 Node* phi = mms.memory();
2551 if (is_decodeN) {
2552 new_ii = new EncodePNode(ii, narrow_t);
2553 } else {
2554 new_ii = new EncodePKlassNode(ii, narrow_t);
2555 }
2556 igvn->register_new_node_with_optimizer(new_ii);
2557 }
2558 }
2559 new_phi->set_req(i, new_ii);
2560 }
2561 igvn->register_new_node_with_optimizer(new_phi, this);
2562 if (is_decodeN) {
2563 progress = new DecodeNNode(new_phi, bottom_type());
2564 } else {
2565 progress = new DecodeNKlassNode(new_phi, bottom_type());
2566 }
2567 }
2568 }
2569 #endif
2570
2571 // Try to convert a Phi with two duplicated convert nodes into a phi of the pre-conversion type and the convert node
2572 // proceeding the phi, to de-duplicate the convert node and compact the IR.
2573 if (can_reshape && progress == nullptr) {
2574 ConvertNode* convert = in(1)->isa_Convert();
2575 if (convert != nullptr) {
2576 int conv_op = convert->Opcode();
2577 bool ok = true;
2578
2579 // Check the rest of the inputs
2580 for (uint i = 2; i < req(); i++) {
2581 // Make sure that all inputs are of the same type of convert node
2582 if (in(i)->Opcode() != conv_op) {
2583 ok = false;
2584 break;
2585 }
2586 }
2587
2588 if (ok) {
2589 // Find the local bottom type to set as the type of the phi
2590 const Type* source_type = Type::get_const_basic_type(convert->in_type()->basic_type());
2594 // Set inputs to the new phi be the inputs of the convert
2595 for (uint i = 1; i < req(); i++) {
2596 newphi->init_req(i, in(i)->in(1));
2597 }
2598
2599 phase->is_IterGVN()->register_new_node_with_optimizer(newphi, this);
2600
2601 return ConvertNode::create_convert(get_convert_type(convert, source_type), get_convert_type(convert, dest_type), newphi);
2602 }
2603 }
2604 }
2605
2606 // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2607 if (EnableVectorReboxing && can_reshape && progress == nullptr && type()->isa_oopptr()) {
2608 progress = merge_through_phi(this, phase->is_IterGVN());
2609 }
2610
2611 return progress; // Return any progress
2612 }
2613
2614 static int compare_types(const Type* const& e1, const Type* const& e2) {
2615 return (intptr_t)e1 - (intptr_t)e2;
2616 }
2617
2618 // Collect types at casts that are going to be eliminated at that Phi and store them in a TypeTuple.
2619 // Sort the types using an arbitrary order so a list of some types always hashes to the same TypeTuple (and TypeTuple
2620 // pointer comparison is enough to tell if 2 list of types are the same or not)
2621 const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
2622 const Node* region = in(0);
2623 const Type* phi_type = bottom_type();
2624 ResourceMark rm;
2625 GrowableArray<const Type*> types;
2626 for (uint i = 1; i < req(); i++) {
2627 if (region->in(i) == nullptr || phase->type(region->in(i)) == Type::TOP) {
2628 continue;
2629 }
2630 Node* in = Node::in(i);
2631 const Type* t = phase->type(in);
2632 if (in == nullptr || in == this || t == Type::TOP) {
2633 continue;
2976 #ifndef PRODUCT
2977 void CatchProjNode::dump_spec(outputStream *st) const {
2978 ProjNode::dump_spec(st);
2979 st->print("@bci %d ",_handler_bci);
2980 }
2981 #endif
2982
2983 //=============================================================================
2984 //------------------------------Identity---------------------------------------
2985 // Check for CreateEx being Identity.
2986 Node* CreateExNode::Identity(PhaseGVN* phase) {
2987 if( phase->type(in(1)) == Type::TOP ) return in(1);
2988 if( phase->type(in(0)) == Type::TOP ) return in(0);
2989 if (phase->type(in(0)->in(0)) == Type::TOP) {
2990 assert(in(0)->is_CatchProj(), "control is CatchProj");
2991 return phase->C->top(); // dead code
2992 }
2993 // We only come from CatchProj, unless the CatchProj goes away.
2994 // If the CatchProj is optimized away, then we just carry the
2995 // exception oop through.
2996 CallNode *call = in(1)->in(0)->as_Call();
2997
2998 return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
2999 in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
3000 }
3001
3002 //=============================================================================
3003 //------------------------------Value------------------------------------------
3004 // Check for being unreachable.
3005 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
3006 if (!in(0) || in(0)->is_top()) return Type::TOP;
3007 return bottom_type();
3008 }
3009
3010 //------------------------------Ideal------------------------------------------
3011 // Check for no longer being part of a loop
3012 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3013 if (can_reshape && !in(0)->is_Region()) {
3014 // Dead code elimination can sometimes delete this projection so
3015 // if it's not there, there's nothing to do.
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/objArrayKlass.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/connode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/inlinetypenode.hpp"
37 #include "opto/loopnode.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/movenode.hpp"
40 #include "opto/narrowptrnode.hpp"
41 #include "opto/mulnode.hpp"
42 #include "opto/phaseX.hpp"
43 #include "opto/regalloc.hpp"
44 #include "opto/regmask.hpp"
45 #include "opto/runtime.hpp"
46 #include "opto/subnode.hpp"
47 #include "opto/vectornode.hpp"
48 #include "utilities/vmError.hpp"
49
50 // Portions of code courtesy of Clifford Click
51
52 // Optimization - Graph Style
53
54 //=============================================================================
55 //------------------------------Value------------------------------------------
56 // Compute the type of the RegionNode.
502 if (left_path == nullptr || right_path == nullptr) {
503 return false;
504 }
505 Node* diamond_if = left_path->in(0);
506 if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
507 // Not an IfNode merging a diamond or TOP.
508 return false;
509 }
510
511 // Check for a proper bool/cmp
512 const Node* bol = diamond_if->in(1);
513 if (!bol->is_Bool()) {
514 return false;
515 }
516 const Node* cmp = bol->in(1);
517 if (!cmp->is_Cmp()) {
518 return false;
519 }
520 return true;
521 }
522
523 //------------------------------Ideal------------------------------------------
524 // Return a node which is more "ideal" than the current node. Must preserve
525 // the CFG, but we can still strip out dead paths.
526 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
527 if( !can_reshape && !in(0) ) return nullptr; // Already degraded to a Copy
528 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
529
530 // Check for RegionNode with no Phi users and both inputs come from either
531 // arm of the same IF. If found, then the control-flow split is useless.
532 bool has_phis = false;
533 if (can_reshape) { // Need DU info to check for Phi users
534 try_clean_mem_phis(phase->is_IterGVN());
535 has_phis = (has_phi() != nullptr); // Cache result
536
537 if (!has_phis) { // No Phi users? Nothing merging?
538 for (uint i = 1; i < req()-1; i++) {
539 Node *if1 = in(i);
540 if( !if1 ) continue;
541 Node *iff = if1->in(0);
542 if( !iff || !iff->is_If() ) continue;
949 if (iff1 == iff2) {
950 igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
951 igvn->replace_input_of(region, idx1, iff1->in(0));
952 igvn->replace_input_of(region, idx2, igvn->C->top());
953 return (region == this); // Remove useless if (both projections map to the same control/value)
954 }
955 BoolNode* bol1 = iff1->in(1)->isa_Bool();
956 BoolNode* bol2 = iff2->in(1)->isa_Bool();
957 if (bol1 == nullptr || bol2 == nullptr) {
958 return false; // No bool inputs found
959 }
960 Node* cmp1 = bol1->in(1);
961 Node* cmp2 = bol2->in(1);
962 bool commute = false;
963 if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
964 return false; // No comparison
965 } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
966 cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
967 cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
968 cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
969 cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() ||
970 cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) {
971 // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
972 // SubTypeCheck is not commutative
973 return false;
974 } else if (cmp1 != cmp2) {
975 if (cmp1->in(1) == cmp2->in(2) &&
976 cmp1->in(2) == cmp2->in(1)) {
977 commute = true; // Same but swapped inputs, commute the test
978 } else {
979 return false; // Ifs are not comparing the same values
980 }
981 }
982 proj1 = proj1->other_if_proj();
983 proj2 = proj2->other_if_proj();
984 if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
985 proj2->unique_ctrl_out_or_null() == this) ||
986 (proj2->unique_ctrl_out_or_null() == iff1 &&
987 proj1->unique_ctrl_out_or_null() == this))) {
988 return false; // Ifs are not connected through other projs
989 }
990 // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged
1029 st->print("#reducible ");
1030 break;
1031 case RegionNode::LoopStatus::NeverIrreducibleEntry:
1032 break; // nothing
1033 }
1034 }
1035 #endif
1036
1037 // Find the one non-null required input. RegionNode only
1038 Node *Node::nonnull_req() const {
1039 assert( is_Region(), "" );
1040 for( uint i = 1; i < _cnt; i++ )
1041 if( in(i) )
1042 return in(i);
1043 ShouldNotReachHere();
1044 return nullptr;
1045 }
1046
1047
1048 //=============================================================================
1049 // note that these functions assume that the _adr_type field is flat
1050 uint PhiNode::hash() const {
1051 const Type* at = _adr_type;
1052 return TypeNode::hash() + (at ? at->hash() : 0);
1053 }
1054 bool PhiNode::cmp( const Node &n ) const {
1055 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1056 }
1057 static inline
1058 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1059 if (at == nullptr || at == TypePtr::BOTTOM) return at;
1060 return Compile::current()->alias_type(at)->adr_type();
1061 }
1062
1063 //----------------------------make---------------------------------------------
1064 // create a new phi with edges matching r and set (initially) to x
1065 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1066 uint preds = r->req(); // Number of predecessor paths
1067 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flat_accesses_share_alias()), "flatten at");
1068 PhiNode* p = new PhiNode(r, t, at);
1069 for (uint j = 1; j < preds; j++) {
1070 // Fill in all inputs, except those which the region does not yet have
1071 if (r->in(j) != nullptr)
1072 p->init_req(j, x);
1073 }
1074 return p;
1075 }
1076 PhiNode* PhiNode::make(Node* r, Node* x) {
1077 const Type* t = x->bottom_type();
1078 const TypePtr* at = nullptr;
1079 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1080 return make(r, x, t, at);
1081 }
1082 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1083 const Type* t = x->bottom_type();
1084 const TypePtr* at = nullptr;
1085 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1086 return new PhiNode(r, t, at);
1087 }
1175 np->as_Phi()->verify_adr_type(visited, at);
1176 } else if (n->bottom_type() == Type::TOP
1177 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1178 // ignore top inputs
1179 } else {
1180 const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1181 // recheck phi/non-phi consistency at leaves:
1182 assert((nat != nullptr) == (at != nullptr), "");
1183 assert(nat == at || nat == TypePtr::BOTTOM,
1184 "adr_type must be consistent at leaves of phi nest");
1185 }
1186 }
1187 }
1188
1189 // Verify a whole nest of phis rooted at this one.
1190 void PhiNode::verify_adr_type(bool recursive) const {
1191 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error
1192 if (Node::in_dump()) return; // muzzle asserts when printing
1193
1194 assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");
1195 // Flat array element shouldn't get their own memory slice until flat_accesses_share_alias is cleared.
1196 // It could be the graph has no loads/stores and flat_accesses_share_alias is never cleared. EA could still
1197 // creates per element Phis but that wouldn't be a problem as there are no memory accesses for that array.
1198 assert(_adr_type == nullptr || _adr_type->isa_aryptr() == nullptr ||
1199 _adr_type->is_aryptr()->is_known_instance() ||
1200 !_adr_type->is_aryptr()->is_flat() ||
1201 !Compile::current()->flat_accesses_share_alias() ||
1202 _adr_type == TypeAryPtr::INLINES, "flat array element shouldn't get its own slice yet");
1203
1204 if (!VerifyAliases) return; // verify thoroughly only if requested
1205
1206 assert(_adr_type == flatten_phi_adr_type(_adr_type),
1207 "Phi::adr_type must be pre-normalized");
1208
1209 if (recursive) {
1210 VectorSet visited;
1211 verify_adr_type(visited, _adr_type);
1212 }
1213 }
1214 #endif
1215
1216
1217 //------------------------------Value------------------------------------------
1218 // Compute the type of the PhiNode
1219 const Type* PhiNode::Value(PhaseGVN* phase) const {
1220 Node *r = in(0); // RegionNode
1221 if( !r ) // Copy or dead
1222 return in(1) ? phase->type(in(1)) : Type::TOP;
1401 assert(is_diamond_phi() > 0, "sanity");
1402 assert(req() == 3, "same as region");
1403 const Node* region = in(0);
1404 for (uint i = 1; i < 3; i++) {
1405 Node* phi_input = in(i);
1406 if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1407 // Nothing is control-dependent on path #i except the region itself.
1408 MergeMemNode* merge_mem = phi_input->as_MergeMem();
1409 uint j = 3 - i;
1410 Node* other_phi_input = in(j);
1411 if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory()) {
1412 // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1413 // This will allow the diamond to collapse completely if there are no other phis left.
1414 igvn->replace_node(this, merge_mem);
1415 return true;
1416 }
1417 }
1418 }
1419 return false;
1420 }
1421
1422 //----------------------------check_cmove_id-----------------------------------
1423 // Check for CMove'ing a constant after comparing against the constant.
1424 // Happens all the time now, since if we compare equality vs a constant in
1425 // the parser, we "know" the variable is constant on one path and we force
1426 // it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1427 // conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more
1428 // general in that we don't need constants. Since CMove's are only inserted
1429 // in very special circumstances, we do it here on generic Phi's.
1430 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1431 assert(true_path !=0, "only diamond shape graph expected");
1432
1433 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1434 // phi->region->if_proj->ifnode->bool->cmp
1435 Node* region = in(0);
1436 Node* iff = region->in(1)->in(0);
1437 BoolNode* b = iff->in(1)->as_Bool();
1438 Node* cmp = b->in(1);
1439 Node* tval = in(true_path);
1440 Node* fval = in(3-true_path);
1441 Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);
2022
2023 if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2024 if (worklist.member(rc->in(0)->in(1))) {
2025 delay = true;
2026 break;
2027 }
2028
2029 if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2030 if (worklist.member(rc->in(0)->in(1)->in(1))) {
2031 delay = true;
2032 break;
2033 }
2034 }
2035
2036 if (delay) {
2037 worklist.push(this);
2038 }
2039 return delay;
2040 }
2041
2042 // Push inline type input nodes (and null) down through the phi recursively (can handle data loops).
2043 InlineTypeNode* PhiNode::push_inline_types_down(PhaseGVN* phase, bool can_reshape, ciInlineKlass* inline_klass) {
2044 assert(inline_klass != nullptr, "must be");
2045 InlineTypeNode* vt = InlineTypeNode::make_null(*phase, inline_klass, /* transform = */ false)->clone_with_phis(phase, in(0), nullptr, !_type->maybe_null());
2046 if (can_reshape) {
2047 // Replace phi right away to be able to use the inline
2048 // type node when reaching the phi again through data loops.
2049 PhaseIterGVN* igvn = phase->is_IterGVN();
2050 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2051 Node* u = fast_out(i);
2052 igvn->rehash_node_delayed(u);
2053 imax -= u->replace_edge(this, vt);
2054 --i;
2055 }
2056 igvn->rehash_node_delayed(this);
2057 assert(outcnt() == 0, "should be dead now");
2058 }
2059 ResourceMark rm;
2060 Node_List casts;
2061 for (uint i = 1; i < req(); ++i) {
2062 Node* n = in(i);
2063 while (n->is_ConstraintCast()) {
2064 casts.push(n);
2065 n = n->in(1);
2066 }
2067 if (phase->type(n)->is_zero_type()) {
2068 n = InlineTypeNode::make_null(*phase, inline_klass);
2069 } else if (n->is_Phi()) {
2070 assert(can_reshape, "can only handle phis during IGVN");
2071 n = phase->transform(n->as_Phi()->push_inline_types_down(phase, can_reshape, inline_klass));
2072 }
2073 while (casts.size() != 0) {
2074 // Push the cast(s) through the InlineTypeNode
2075 // TODO 8325106 Can we avoid cloning?
2076 Node* cast = casts.pop()->clone();
2077 cast->set_req_X(1, n->as_InlineType()->get_oop(), phase);
2078 n = n->clone();
2079 n->as_InlineType()->set_oop(*phase, phase->transform(cast));
2080 n = phase->transform(n);
2081 }
2082 bool transform = !can_reshape && (i == (req()-1)); // Transform phis on last merge
2083 vt->merge_with(phase, n->as_InlineType(), i, transform);
2084 }
2085 return vt;
2086 }
2087
2088 // If the Phi's Region is in an irreducible loop, and the Region
2089 // has had an input removed, but not yet transformed, it could be
2090 // that the Region (and this Phi) are not reachable from Root.
2091 // If we allow the Phi to collapse before the Region, this may lead
2092 // to dead-loop data. Wait for the Region to check for reachability,
2093 // and potentially remove the dead code.
2094 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2095 RegionNode* region = in(0)->as_Region();
2096 if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2097 Node* top = phase->C->top();
2098 for (uint j = 1; j < req(); j++) {
2099 Node* rc = region->in(j); // for each control input
2100 if (rc == nullptr || phase->type(rc) == Type::TOP) {
2101 // Region is missing a control input
2102 Node* n = in(j);
2103 if (n != nullptr && n != top) {
2104 // Phi still has its input, so region just lost its input
2105 return true;
2106 }
2107 }
2404 for (uint i = 1; i < req(); i++) {
2405 offset->init_req(i, in(i)->in(AddPNode::Offset));
2406 }
2407 phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2408 }
2409 return new AddPNode(base, address, offset);
2410 }
2411 }
2412 }
2413
2414 // Split phis through memory merges, so that the memory merges will go away.
2415 // Piggy-back this transformation on the search for a unique input....
2416 // It will be as if the merged memory is the unique value of the phi.
2417 // (Do not attempt this optimization unless parsing is complete.
2418 // It would make the parser's memory-merge logic sick.)
2419 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2420 if (progress == nullptr && can_reshape && type() == Type::MEMORY) {
2421 // see if this phi should be sliced
2422 uint merge_width = 0;
2423 bool saw_self = false;
2424 // TODO revisit this with JDK-8247216
2425 bool mergemem_only = true;
2426 for( uint i=1; i<req(); ++i ) {// For all paths in
2427 Node *ii = in(i);
2428 // TOP inputs should not be counted as safe inputs because if the
2429 // Phi references itself through all other inputs then splitting the
2430 // Phi through memory merges would create dead loop at later stage.
2431 if (ii == top) {
2432 return nullptr; // Delay optimization until graph is cleaned.
2433 }
2434 if (ii->is_MergeMem()) {
2435 MergeMemNode* n = ii->as_MergeMem();
2436 merge_width = MAX2(merge_width, n->req());
2437 saw_self = saw_self || (n->base_memory() == this);
2438 } else {
2439 mergemem_only = false;
2440 }
2441 }
2442
2443 // This restriction is temporarily necessary to ensure termination:
2444 if (!mergemem_only && !saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0;
2445
2446 if (merge_width > Compile::AliasIdxRaw) {
2447 // found at least one non-empty MergeMem
2448 const TypePtr* at = adr_type();
2449 if (at != TypePtr::BOTTOM) {
2450 // Patch the existing phi to select an input from the merge:
2451 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2452 // Phi:AT1(...m1...)
2453 int alias_idx = phase->C->get_alias_index(at);
2454 for (uint i=1; i<req(); ++i) {
2455 Node *ii = in(i);
2456 if (ii->is_MergeMem()) {
2457 MergeMemNode* n = ii->as_MergeMem();
2458 // compress paths and change unreachable cycles to TOP
2459 // If not, we can update the input infinitely along a MergeMem cycle
2460 // Equivalent code is in MemNode::Ideal_common
2461 Node *m = phase->transform(n);
2462 if (outcnt() == 0) { // Above transform() may kill us!
2463 return top;
2464 }
2495 if (!saw_safe_input) {
2496 // There is a dead loop: All inputs are either dead or reference back to this phi
2497 return top;
2498 }
2499
2500 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2501 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2502 PhaseIterGVN* igvn = phase->is_IterGVN();
2503 assert(igvn != nullptr, "sanity check");
2504 Node* hook = new Node(1);
2505 PhiNode* new_base = (PhiNode*) clone();
2506 // Must eagerly register phis, since they participate in loops.
2507 igvn->register_new_node_with_optimizer(new_base);
2508 hook->add_req(new_base);
2509
2510 MergeMemNode* result = MergeMemNode::make(new_base);
2511 for (uint i = 1; i < req(); ++i) {
2512 Node *ii = in(i);
2513 if (ii->is_MergeMem()) {
2514 MergeMemNode* n = ii->as_MergeMem();
2515 if (igvn) {
2516 // TODO revisit this with JDK-8247216
2517 // Put 'n' on the worklist because it might be modified by MergeMemStream::iteration_setup
2518 igvn->_worklist.push(n);
2519 }
2520 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2521 // If we have not seen this slice yet, make a phi for it.
2522 bool made_new_phi = false;
2523 if (mms.is_empty()) {
2524 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2525 made_new_phi = true;
2526 igvn->register_new_node_with_optimizer(new_phi);
2527 hook->add_req(new_phi);
2528 mms.set_memory(new_phi);
2529 }
2530 Node* phi = mms.memory();
2531 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2532 phi->set_req(i, mms.memory2());
2533 }
2534 }
2535 }
2536 // Distribute all self-loops.
2537 { // (Extra braces to hide mms.)
2538 for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2539 Node* phi = mms.memory();
2618 if (is_decodeN) {
2619 new_ii = new EncodePNode(ii, narrow_t);
2620 } else {
2621 new_ii = new EncodePKlassNode(ii, narrow_t);
2622 }
2623 igvn->register_new_node_with_optimizer(new_ii);
2624 }
2625 }
2626 new_phi->set_req(i, new_ii);
2627 }
2628 igvn->register_new_node_with_optimizer(new_phi, this);
2629 if (is_decodeN) {
2630 progress = new DecodeNNode(new_phi, bottom_type());
2631 } else {
2632 progress = new DecodeNKlassNode(new_phi, bottom_type());
2633 }
2634 }
2635 }
2636 #endif
2637
2638 Node* inline_type = try_push_inline_types_down(phase, can_reshape);
2639 if (inline_type != this) {
2640 return inline_type;
2641 }
2642
2643 // Try to convert a Phi with two duplicated convert nodes into a phi of the pre-conversion type and the convert node
2644 // proceeding the phi, to de-duplicate the convert node and compact the IR.
2645 if (can_reshape && progress == nullptr) {
2646 ConvertNode* convert = in(1)->isa_Convert();
2647 if (convert != nullptr) {
2648 int conv_op = convert->Opcode();
2649 bool ok = true;
2650
2651 // Check the rest of the inputs
2652 for (uint i = 2; i < req(); i++) {
2653 // Make sure that all inputs are of the same type of convert node
2654 if (in(i)->Opcode() != conv_op) {
2655 ok = false;
2656 break;
2657 }
2658 }
2659
2660 if (ok) {
2661 // Find the local bottom type to set as the type of the phi
2662 const Type* source_type = Type::get_const_basic_type(convert->in_type()->basic_type());
2666 // Set inputs to the new phi be the inputs of the convert
2667 for (uint i = 1; i < req(); i++) {
2668 newphi->init_req(i, in(i)->in(1));
2669 }
2670
2671 phase->is_IterGVN()->register_new_node_with_optimizer(newphi, this);
2672
2673 return ConvertNode::create_convert(get_convert_type(convert, source_type), get_convert_type(convert, dest_type), newphi);
2674 }
2675 }
2676 }
2677
2678 // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2679 if (EnableVectorReboxing && can_reshape && progress == nullptr && type()->isa_oopptr()) {
2680 progress = merge_through_phi(this, phase->is_IterGVN());
2681 }
2682
2683 return progress; // Return any progress
2684 }
2685
2686 // Check recursively if inputs are either an inline type, constant null
2687 // or another Phi (including self references through data loops). If so,
2688 // push the inline types down through the phis to enable folding of loads.
2689 Node* PhiNode::try_push_inline_types_down(PhaseGVN* phase, const bool can_reshape) {
2690 if (!can_be_inline_type()) {
2691 return this;
2692 }
2693
2694 ciInlineKlass* inline_klass;
2695 if (can_push_inline_types_down(phase, can_reshape, inline_klass)) {
2696 assert(inline_klass != nullptr, "must be");
2697 return push_inline_types_down(phase, can_reshape, inline_klass);
2698 }
2699 return this;
2700 }
2701
2702 bool PhiNode::can_push_inline_types_down(PhaseGVN* phase, const bool can_reshape, ciInlineKlass*& inline_klass) {
2703 if (req() <= 2) {
2704 // Dead phi.
2705 return false;
2706 }
2707 inline_klass = nullptr;
2708
2709 // TODO 8302217 We need to prevent endless pushing through
2710 bool only_phi = (outcnt() != 0);
2711 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2712 Node* n = fast_out(i);
2713 if (n->is_InlineType() && n->in(1) == this) {
2714 return false;
2715 }
2716 if (!n->is_Phi()) {
2717 only_phi = false;
2718 }
2719 }
2720 if (only_phi) {
2721 return false;
2722 }
2723
2724 ResourceMark rm;
2725 Unique_Node_List worklist;
2726 worklist.push(this);
2727 Node_List casts;
2728
2729 for (uint next = 0; next < worklist.size(); next++) {
2730 Node* phi = worklist.at(next);
2731 for (uint i = 1; i < phi->req(); i++) {
2732 Node* n = phi->in(i);
2733 if (n == nullptr) {
2734 return false;
2735 }
2736 while (n->is_ConstraintCast()) {
2737 if (n->in(0) != nullptr && n->in(0)->is_top()) {
2738 // Will die, don't optimize
2739 return false;
2740 }
2741 casts.push(n);
2742 n = n->in(1);
2743 }
2744 const Type* type = phase->type(n);
2745 if (n->is_InlineType() && (inline_klass == nullptr || inline_klass == type->inline_klass())) {
2746 inline_klass = type->inline_klass();
2747 } else if (n->is_Phi() && can_reshape && n->bottom_type()->isa_ptr()) {
2748 worklist.push(n);
2749 } else if (!type->is_zero_type()) {
2750 return false;
2751 }
2752 }
2753 }
2754 if (inline_klass == nullptr) {
2755 return false;
2756 }
2757
2758 // Check if cast nodes can be pushed through
2759 const Type* t = Type::get_const_type(inline_klass);
2760 while (casts.size() != 0 && t != nullptr) {
2761 Node* cast = casts.pop();
2762 if (t->filter(cast->bottom_type()) == Type::TOP) {
2763 return false;
2764 }
2765 }
2766
2767 return true;
2768 }
2769
2770 static int compare_types(const Type* const& e1, const Type* const& e2) {
2771 return (intptr_t)e1 - (intptr_t)e2;
2772 }
2773
2774 // Collect types at casts that are going to be eliminated at that Phi and store them in a TypeTuple.
2775 // Sort the types using an arbitrary order so a list of some types always hashes to the same TypeTuple (and TypeTuple
2776 // pointer comparison is enough to tell if 2 list of types are the same or not)
2777 const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
2778 const Node* region = in(0);
2779 const Type* phi_type = bottom_type();
2780 ResourceMark rm;
2781 GrowableArray<const Type*> types;
2782 for (uint i = 1; i < req(); i++) {
2783 if (region->in(i) == nullptr || phase->type(region->in(i)) == Type::TOP) {
2784 continue;
2785 }
2786 Node* in = Node::in(i);
2787 const Type* t = phase->type(in);
2788 if (in == nullptr || in == this || t == Type::TOP) {
2789 continue;
3132 #ifndef PRODUCT
3133 void CatchProjNode::dump_spec(outputStream *st) const {
3134 ProjNode::dump_spec(st);
3135 st->print("@bci %d ",_handler_bci);
3136 }
3137 #endif
3138
3139 //=============================================================================
3140 //------------------------------Identity---------------------------------------
3141 // Check for CreateEx being Identity.
3142 Node* CreateExNode::Identity(PhaseGVN* phase) {
3143 if( phase->type(in(1)) == Type::TOP ) return in(1);
3144 if( phase->type(in(0)) == Type::TOP ) return in(0);
3145 if (phase->type(in(0)->in(0)) == Type::TOP) {
3146 assert(in(0)->is_CatchProj(), "control is CatchProj");
3147 return phase->C->top(); // dead code
3148 }
3149 // We only come from CatchProj, unless the CatchProj goes away.
3150 // If the CatchProj is optimized away, then we just carry the
3151 // exception oop through.
3152
3153 // CheckCastPPNode::Ideal() for inline types reuses the exception
3154 // paths of a call to perform an allocation: we can see a Phi here.
3155 if (in(1)->is_Phi()) {
3156 return this;
3157 }
3158 CallNode *call = in(1)->in(0)->as_Call();
3159
3160 return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
3161 in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
3162 }
3163
3164 //=============================================================================
3165 //------------------------------Value------------------------------------------
3166 // Check for being unreachable.
3167 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
3168 if (!in(0) || in(0)->is_top()) return Type::TOP;
3169 return bottom_type();
3170 }
3171
3172 //------------------------------Ideal------------------------------------------
3173 // Check for no longer being part of a loop
3174 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3175 if (can_reshape && !in(0)->is_Region()) {
3176 // Dead code elimination can sometimes delete this projection so
3177 // if it's not there, there's nothing to do.
|