15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/objArrayKlass.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/cfgnode.hpp"
33 #include "opto/connode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/loopnode.hpp"
36 #include "opto/machnode.hpp"
37 #include "opto/movenode.hpp"
38 #include "opto/narrowptrnode.hpp"
39 #include "opto/mulnode.hpp"
40 #include "opto/phaseX.hpp"
41 #include "opto/regalloc.hpp"
42 #include "opto/regmask.hpp"
43 #include "opto/runtime.hpp"
44 #include "opto/subnode.hpp"
45 #include "opto/vectornode.hpp"
46 #include "utilities/vmError.hpp"
47
48 // Portions of code courtesy of Clifford Click
49
50 // Optimization - Graph Style
51
52 //=============================================================================
53 //------------------------------Value------------------------------------------
54 // Compute the type of the RegionNode.
503 if (left_path == nullptr || right_path == nullptr) {
504 return false;
505 }
506 Node* diamond_if = left_path->in(0);
507 if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
508 // Not an IfNode merging a diamond or TOP.
509 return false;
510 }
511
512 // Check for a proper bool/cmp
513 const Node* bol = diamond_if->in(1);
514 if (!bol->is_Bool()) {
515 return false;
516 }
517 const Node* cmp = bol->in(1);
518 if (!cmp->is_Cmp()) {
519 return false;
520 }
521 return true;
522 }
523 //------------------------------Ideal------------------------------------------
524 // Return a node which is more "ideal" than the current node. Must preserve
525 // the CFG, but we can still strip out dead paths.
526 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
527 if( !can_reshape && !in(0) ) return nullptr; // Already degraded to a Copy
528 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
529
530 // Check for RegionNode with no Phi users and both inputs come from either
531 // arm of the same IF. If found, then the control-flow split is useless.
532 bool has_phis = false;
533 if (can_reshape) { // Need DU info to check for Phi users
534 try_clean_mem_phis(phase->is_IterGVN());
535 has_phis = (has_phi() != nullptr); // Cache result
536
537 if (!has_phis) { // No Phi users? Nothing merging?
538 for (uint i = 1; i < req()-1; i++) {
539 Node *if1 = in(i);
540 if( !if1 ) continue;
541 Node *iff = if1->in(0);
542 if( !iff || !iff->is_If() ) continue;
949 if (iff1 == iff2) {
950 igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
951 igvn->replace_input_of(region, idx1, iff1->in(0));
952 igvn->replace_input_of(region, idx2, igvn->C->top());
953 return (region == this); // Remove useless if (both projections map to the same control/value)
954 }
955 BoolNode* bol1 = iff1->in(1)->isa_Bool();
956 BoolNode* bol2 = iff2->in(1)->isa_Bool();
957 if (bol1 == nullptr || bol2 == nullptr) {
958 return false; // No bool inputs found
959 }
960 Node* cmp1 = bol1->in(1);
961 Node* cmp2 = bol2->in(1);
962 bool commute = false;
963 if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
964 return false; // No comparison
965 } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
966 cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
967 cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
968 cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
969 cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck()) {
970 // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
971 // SubTypeCheck is not commutative
972 return false;
973 } else if (cmp1 != cmp2) {
974 if (cmp1->in(1) == cmp2->in(2) &&
975 cmp1->in(2) == cmp2->in(1)) {
976 commute = true; // Same but swapped inputs, commute the test
977 } else {
978 return false; // Ifs are not comparing the same values
979 }
980 }
981 proj1 = proj1->other_if_proj();
982 proj2 = proj2->other_if_proj();
983 if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
984 proj2->unique_ctrl_out_or_null() == this) ||
985 (proj2->unique_ctrl_out_or_null() == iff1 &&
986 proj1->unique_ctrl_out_or_null() == this))) {
987 return false; // Ifs are not connected through other projs
988 }
989 // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged
1028 st->print("#reducible ");
1029 break;
1030 case RegionNode::LoopStatus::NeverIrreducibleEntry:
1031 break; // nothing
1032 }
1033 }
1034 #endif
1035
1036 // Find the one non-null required input. RegionNode only
1037 Node *Node::nonnull_req() const {
1038 assert( is_Region(), "" );
1039 for( uint i = 1; i < _cnt; i++ )
1040 if( in(i) )
1041 return in(i);
1042 ShouldNotReachHere();
1043 return nullptr;
1044 }
1045
1046
1047 //=============================================================================
1048 // note that these functions assume that the _adr_type field is flattened
1049 uint PhiNode::hash() const {
1050 const Type* at = _adr_type;
1051 return TypeNode::hash() + (at ? at->hash() : 0);
1052 }
1053 bool PhiNode::cmp( const Node &n ) const {
1054 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1055 }
1056 static inline
1057 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1058 if (at == nullptr || at == TypePtr::BOTTOM) return at;
1059 return Compile::current()->alias_type(at)->adr_type();
1060 }
1061
1062 //----------------------------make---------------------------------------------
1063 // create a new phi with edges matching r and set (initially) to x
1064 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1065 uint preds = r->req(); // Number of predecessor paths
1066 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at");
1067 PhiNode* p = new PhiNode(r, t, at);
1068 for (uint j = 1; j < preds; j++) {
1069 // Fill in all inputs, except those which the region does not yet have
1070 if (r->in(j) != nullptr)
1071 p->init_req(j, x);
1072 }
1073 return p;
1074 }
1075 PhiNode* PhiNode::make(Node* r, Node* x) {
1076 const Type* t = x->bottom_type();
1077 const TypePtr* at = nullptr;
1078 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1079 return make(r, x, t, at);
1080 }
1081 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1082 const Type* t = x->bottom_type();
1083 const TypePtr* at = nullptr;
1084 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1085 return new PhiNode(r, t, at);
1086 }
1174 np->as_Phi()->verify_adr_type(visited, at);
1175 } else if (n->bottom_type() == Type::TOP
1176 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1177 // ignore top inputs
1178 } else {
1179 const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1180 // recheck phi/non-phi consistency at leaves:
1181 assert((nat != nullptr) == (at != nullptr), "");
1182 assert(nat == at || nat == TypePtr::BOTTOM,
1183 "adr_type must be consistent at leaves of phi nest");
1184 }
1185 }
1186 }
1187
1188 // Verify a whole nest of phis rooted at this one.
1189 void PhiNode::verify_adr_type(bool recursive) const {
1190 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error
1191 if (Node::in_dump()) return; // muzzle asserts when printing
1192
1193 assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");
1194
1195 if (!VerifyAliases) return; // verify thoroughly only if requested
1196
1197 assert(_adr_type == flatten_phi_adr_type(_adr_type),
1198 "Phi::adr_type must be pre-normalized");
1199
1200 if (recursive) {
1201 VectorSet visited;
1202 verify_adr_type(visited, _adr_type);
1203 }
1204 }
1205 #endif
1206
1207
1208 //------------------------------Value------------------------------------------
1209 // Compute the type of the PhiNode
1210 const Type* PhiNode::Value(PhaseGVN* phase) const {
1211 Node *r = in(0); // RegionNode
1212 if( !r ) // Copy or dead
1213 return in(1) ? phase->type(in(1)) : Type::TOP;
1392 assert(is_diamond_phi() > 0, "sanity");
1393 assert(req() == 3, "same as region");
1394 const Node* region = in(0);
1395 for (uint i = 1; i < 3; i++) {
1396 Node* phi_input = in(i);
1397 if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1398 // Nothing is control-dependent on path #i except the region itself.
1399 MergeMemNode* merge_mem = phi_input->as_MergeMem();
1400 uint j = 3 - i;
1401 Node* other_phi_input = in(j);
1402 if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory()) {
1403 // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1404 // This will allow the diamond to collapse completely if there are no other phis left.
1405 igvn->replace_node(this, merge_mem);
1406 return true;
1407 }
1408 }
1409 }
1410 return false;
1411 }
1412 //----------------------------check_cmove_id-----------------------------------
1413 // Check for CMove'ing a constant after comparing against the constant.
1414 // Happens all the time now, since if we compare equality vs a constant in
1415 // the parser, we "know" the variable is constant on one path and we force
1416 // it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1417 // conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more
1418 // general in that we don't need constants. Since CMove's are only inserted
1419 // in very special circumstances, we do it here on generic Phi's.
1420 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1421 assert(true_path !=0, "only diamond shape graph expected");
1422
1423 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1424 // phi->region->if_proj->ifnode->bool->cmp
1425 Node* region = in(0);
1426 Node* iff = region->in(1)->in(0);
1427 BoolNode* b = iff->in(1)->as_Bool();
1428 Node* cmp = b->in(1);
1429 Node* tval = in(true_path);
1430 Node* fval = in(3-true_path);
1431 Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);
2012
2013 if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2014 if (worklist.member(rc->in(0)->in(1))) {
2015 delay = true;
2016 break;
2017 }
2018
2019 if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2020 if (worklist.member(rc->in(0)->in(1)->in(1))) {
2021 delay = true;
2022 break;
2023 }
2024 }
2025
2026 if (delay) {
2027 worklist.push(this);
2028 }
2029 return delay;
2030 }
2031
2032 // If the Phi's Region is in an irreducible loop, and the Region
2033 // has had an input removed, but not yet transformed, it could be
2034 // that the Region (and this Phi) are not reachable from Root.
2035 // If we allow the Phi to collapse before the Region, this may lead
2036 // to dead-loop data. Wait for the Region to check for reachability,
2037 // and potentially remove the dead code.
2038 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2039 RegionNode* region = in(0)->as_Region();
2040 if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2041 Node* top = phase->C->top();
2042 for (uint j = 1; j < req(); j++) {
2043 Node* rc = region->in(j); // for each control input
2044 if (rc == nullptr || phase->type(rc) == Type::TOP) {
2045 // Region is missing a control input
2046 Node* n = in(j);
2047 if (n != nullptr && n != top) {
2048 // Phi still has its input, so region just lost its input
2049 return true;
2050 }
2051 }
2344 for (uint i = 1; i < req(); i++) {
2345 offset->init_req(i, in(i)->in(AddPNode::Offset));
2346 }
2347 phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2348 }
2349 return new AddPNode(base, address, offset);
2350 }
2351 }
2352 }
2353
2354 // Split phis through memory merges, so that the memory merges will go away.
2355 // Piggy-back this transformation on the search for a unique input....
2356 // It will be as if the merged memory is the unique value of the phi.
2357 // (Do not attempt this optimization unless parsing is complete.
2358 // It would make the parser's memory-merge logic sick.)
2359 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2360 if (progress == nullptr && can_reshape && type() == Type::MEMORY) {
2361 // see if this phi should be sliced
2362 uint merge_width = 0;
2363 bool saw_self = false;
2364 for( uint i=1; i<req(); ++i ) {// For all paths in
2365 Node *ii = in(i);
2366 // TOP inputs should not be counted as safe inputs because if the
2367 // Phi references itself through all other inputs then splitting the
2368 // Phi through memory merges would create dead loop at later stage.
2369 if (ii == top) {
2370 return nullptr; // Delay optimization until graph is cleaned.
2371 }
2372 if (ii->is_MergeMem()) {
2373 MergeMemNode* n = ii->as_MergeMem();
2374 merge_width = MAX2(merge_width, n->req());
2375 saw_self = saw_self || (n->base_memory() == this);
2376 }
2377 }
2378
2379 // This restriction is temporarily necessary to ensure termination:
2380 if (!saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0;
2381
2382 if (merge_width > Compile::AliasIdxRaw) {
2383 // found at least one non-empty MergeMem
2384 const TypePtr* at = adr_type();
2385 if (at != TypePtr::BOTTOM) {
2386 // Patch the existing phi to select an input from the merge:
2387 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2388 // Phi:AT1(...m1...)
2389 int alias_idx = phase->C->get_alias_index(at);
2390 for (uint i=1; i<req(); ++i) {
2391 Node *ii = in(i);
2392 if (ii->is_MergeMem()) {
2393 MergeMemNode* n = ii->as_MergeMem();
2394 // compress paths and change unreachable cycles to TOP
2395 // If not, we can update the input infinitely along a MergeMem cycle
2396 // Equivalent code is in MemNode::Ideal_common
2397 Node *m = phase->transform(n);
2398 if (outcnt() == 0) { // Above transform() may kill us!
2399 return top;
2400 }
2431 if (!saw_safe_input) {
2432 // There is a dead loop: All inputs are either dead or reference back to this phi
2433 return top;
2434 }
2435
2436 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2437 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2438 PhaseIterGVN* igvn = phase->is_IterGVN();
2439 assert(igvn != nullptr, "sanity check");
2440 Node* hook = new Node(1);
2441 PhiNode* new_base = (PhiNode*) clone();
2442 // Must eagerly register phis, since they participate in loops.
2443 igvn->register_new_node_with_optimizer(new_base);
2444 hook->add_req(new_base);
2445
2446 MergeMemNode* result = MergeMemNode::make(new_base);
2447 for (uint i = 1; i < req(); ++i) {
2448 Node *ii = in(i);
2449 if (ii->is_MergeMem()) {
2450 MergeMemNode* n = ii->as_MergeMem();
2451 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2452 // If we have not seen this slice yet, make a phi for it.
2453 bool made_new_phi = false;
2454 if (mms.is_empty()) {
2455 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2456 made_new_phi = true;
2457 igvn->register_new_node_with_optimizer(new_phi);
2458 hook->add_req(new_phi);
2459 mms.set_memory(new_phi);
2460 }
2461 Node* phi = mms.memory();
2462 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2463 phi->set_req(i, mms.memory2());
2464 }
2465 }
2466 }
2467 // Distribute all self-loops.
2468 { // (Extra braces to hide mms.)
2469 for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2470 Node* phi = mms.memory();
2549 if (is_decodeN) {
2550 new_ii = new EncodePNode(ii, narrow_t);
2551 } else {
2552 new_ii = new EncodePKlassNode(ii, narrow_t);
2553 }
2554 igvn->register_new_node_with_optimizer(new_ii);
2555 }
2556 }
2557 new_phi->set_req(i, new_ii);
2558 }
2559 igvn->register_new_node_with_optimizer(new_phi, this);
2560 if (is_decodeN) {
2561 progress = new DecodeNNode(new_phi, bottom_type());
2562 } else {
2563 progress = new DecodeNKlassNode(new_phi, bottom_type());
2564 }
2565 }
2566 }
2567 #endif
2568
2569 // Try to convert a Phi with two duplicated convert nodes into a phi of the pre-conversion type and the convert node
2570 // proceeding the phi, to de-duplicate the convert node and compact the IR.
2571 if (can_reshape && progress == nullptr) {
2572 ConvertNode* convert = in(1)->isa_Convert();
2573 if (convert != nullptr) {
2574 int conv_op = convert->Opcode();
2575 bool ok = true;
2576
2577 // Check the rest of the inputs
2578 for (uint i = 2; i < req(); i++) {
2579 // Make sure that all inputs are of the same type of convert node
2580 if (in(i)->Opcode() != conv_op) {
2581 ok = false;
2582 break;
2583 }
2584 }
2585
2586 if (ok) {
2587 // Find the local bottom type to set as the type of the phi
2588 const Type* source_type = Type::get_const_basic_type(convert->in_type()->basic_type());
2592 // Set inputs to the new phi be the inputs of the convert
2593 for (uint i = 1; i < req(); i++) {
2594 newphi->init_req(i, in(i)->in(1));
2595 }
2596
2597 phase->is_IterGVN()->register_new_node_with_optimizer(newphi, this);
2598
2599 return ConvertNode::create_convert(get_convert_type(convert, source_type), get_convert_type(convert, dest_type), newphi);
2600 }
2601 }
2602 }
2603
2604 // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2605 if (EnableVectorReboxing && can_reshape && progress == nullptr && type()->isa_oopptr()) {
2606 progress = merge_through_phi(this, phase->is_IterGVN());
2607 }
2608
2609 return progress; // Return any progress
2610 }
2611
2612 static int compare_types(const Type* const& e1, const Type* const& e2) {
2613 return (intptr_t)e1 - (intptr_t)e2;
2614 }
2615
2616 // Collect types at casts that are going to be eliminated at that Phi and store them in a TypeTuple.
2617 // Sort the types using an arbitrary order so a list of some types always hashes to the same TypeTuple (and TypeTuple
2618 // pointer comparison is enough to tell if 2 list of types are the same or not)
2619 const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
2620 const Node* region = in(0);
2621 const Type* phi_type = bottom_type();
2622 ResourceMark rm;
2623 GrowableArray<const Type*> types;
2624 for (uint i = 1; i < req(); i++) {
2625 if (region->in(i) == nullptr || phase->type(region->in(i)) == Type::TOP) {
2626 continue;
2627 }
2628 Node* in = Node::in(i);
2629 const Type* t = phase->type(in);
2630 if (in == nullptr || in == this || t == Type::TOP) {
2631 continue;
2974 #ifndef PRODUCT
2975 void CatchProjNode::dump_spec(outputStream *st) const {
2976 ProjNode::dump_spec(st);
2977 st->print("@bci %d ",_handler_bci);
2978 }
2979 #endif
2980
2981 //=============================================================================
2982 //------------------------------Identity---------------------------------------
2983 // Check for CreateEx being Identity.
2984 Node* CreateExNode::Identity(PhaseGVN* phase) {
2985 if( phase->type(in(1)) == Type::TOP ) return in(1);
2986 if( phase->type(in(0)) == Type::TOP ) return in(0);
2987 if (phase->type(in(0)->in(0)) == Type::TOP) {
2988 assert(in(0)->is_CatchProj(), "control is CatchProj");
2989 return phase->C->top(); // dead code
2990 }
2991 // We only come from CatchProj, unless the CatchProj goes away.
2992 // If the CatchProj is optimized away, then we just carry the
2993 // exception oop through.
2994 CallNode *call = in(1)->in(0)->as_Call();
2995
2996 return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
2997 in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
2998 }
2999
3000 //=============================================================================
3001 //------------------------------Value------------------------------------------
3002 // Check for being unreachable.
3003 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
3004 if (!in(0) || in(0)->is_top()) return Type::TOP;
3005 return bottom_type();
3006 }
3007
3008 //------------------------------Ideal------------------------------------------
3009 // Check for no longer being part of a loop
3010 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3011 if (can_reshape && !in(0)->is_Region()) {
3012 // Dead code elimination can sometimes delete this projection so
3013 // if it's not there, there's nothing to do.
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/objArrayKlass.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/cfgnode.hpp"
33 #include "opto/connode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/inlinetypenode.hpp"
36 #include "opto/loopnode.hpp"
37 #include "opto/machnode.hpp"
38 #include "opto/movenode.hpp"
39 #include "opto/narrowptrnode.hpp"
40 #include "opto/mulnode.hpp"
41 #include "opto/phaseX.hpp"
42 #include "opto/regalloc.hpp"
43 #include "opto/regmask.hpp"
44 #include "opto/runtime.hpp"
45 #include "opto/subnode.hpp"
46 #include "opto/vectornode.hpp"
47 #include "utilities/vmError.hpp"
48
49 // Portions of code courtesy of Clifford Click
50
51 // Optimization - Graph Style
52
53 //=============================================================================
54 //------------------------------Value------------------------------------------
55 // Compute the type of the RegionNode.
504 if (left_path == nullptr || right_path == nullptr) {
505 return false;
506 }
507 Node* diamond_if = left_path->in(0);
508 if (diamond_if == nullptr || !diamond_if->is_If() || diamond_if != right_path->in(0)) {
509 // Not an IfNode merging a diamond or TOP.
510 return false;
511 }
512
513 // Check for a proper bool/cmp
514 const Node* bol = diamond_if->in(1);
515 if (!bol->is_Bool()) {
516 return false;
517 }
518 const Node* cmp = bol->in(1);
519 if (!cmp->is_Cmp()) {
520 return false;
521 }
522 return true;
523 }
524
525 //------------------------------Ideal------------------------------------------
526 // Return a node which is more "ideal" than the current node. Must preserve
527 // the CFG, but we can still strip out dead paths.
528 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
529 if( !can_reshape && !in(0) ) return nullptr; // Already degraded to a Copy
530 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
531
532 // Check for RegionNode with no Phi users and both inputs come from either
533 // arm of the same IF. If found, then the control-flow split is useless.
534 bool has_phis = false;
535 if (can_reshape) { // Need DU info to check for Phi users
536 try_clean_mem_phis(phase->is_IterGVN());
537 has_phis = (has_phi() != nullptr); // Cache result
538
539 if (!has_phis) { // No Phi users? Nothing merging?
540 for (uint i = 1; i < req()-1; i++) {
541 Node *if1 = in(i);
542 if( !if1 ) continue;
543 Node *iff = if1->in(0);
544 if( !iff || !iff->is_If() ) continue;
951 if (iff1 == iff2) {
952 igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
953 igvn->replace_input_of(region, idx1, iff1->in(0));
954 igvn->replace_input_of(region, idx2, igvn->C->top());
955 return (region == this); // Remove useless if (both projections map to the same control/value)
956 }
957 BoolNode* bol1 = iff1->in(1)->isa_Bool();
958 BoolNode* bol2 = iff2->in(1)->isa_Bool();
959 if (bol1 == nullptr || bol2 == nullptr) {
960 return false; // No bool inputs found
961 }
962 Node* cmp1 = bol1->in(1);
963 Node* cmp2 = bol2->in(1);
964 bool commute = false;
965 if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
966 return false; // No comparison
967 } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
968 cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
969 cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
970 cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
971 cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() ||
972 cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) {
973 // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
974 // SubTypeCheck is not commutative
975 return false;
976 } else if (cmp1 != cmp2) {
977 if (cmp1->in(1) == cmp2->in(2) &&
978 cmp1->in(2) == cmp2->in(1)) {
979 commute = true; // Same but swapped inputs, commute the test
980 } else {
981 return false; // Ifs are not comparing the same values
982 }
983 }
984 proj1 = proj1->other_if_proj();
985 proj2 = proj2->other_if_proj();
986 if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
987 proj2->unique_ctrl_out_or_null() == this) ||
988 (proj2->unique_ctrl_out_or_null() == iff1 &&
989 proj1->unique_ctrl_out_or_null() == this))) {
990 return false; // Ifs are not connected through other projs
991 }
992 // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged
1031 st->print("#reducible ");
1032 break;
1033 case RegionNode::LoopStatus::NeverIrreducibleEntry:
1034 break; // nothing
1035 }
1036 }
1037 #endif
1038
1039 // Find the one non-null required input. RegionNode only
1040 Node *Node::nonnull_req() const {
1041 assert( is_Region(), "" );
1042 for( uint i = 1; i < _cnt; i++ )
1043 if( in(i) )
1044 return in(i);
1045 ShouldNotReachHere();
1046 return nullptr;
1047 }
1048
1049
1050 //=============================================================================
1051 // note that these functions assume that the _adr_type field is flat
1052 uint PhiNode::hash() const {
1053 const Type* at = _adr_type;
1054 return TypeNode::hash() + (at ? at->hash() : 0);
1055 }
1056 bool PhiNode::cmp( const Node &n ) const {
1057 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
1058 }
1059 static inline
1060 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
1061 if (at == nullptr || at == TypePtr::BOTTOM) return at;
1062 return Compile::current()->alias_type(at)->adr_type();
1063 }
1064
1065 //----------------------------make---------------------------------------------
1066 // create a new phi with edges matching r and set (initially) to x
1067 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
1068 uint preds = r->req(); // Number of predecessor paths
1069 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flat_accesses_share_alias()), "flatten at");
1070 PhiNode* p = new PhiNode(r, t, at);
1071 for (uint j = 1; j < preds; j++) {
1072 // Fill in all inputs, except those which the region does not yet have
1073 if (r->in(j) != nullptr)
1074 p->init_req(j, x);
1075 }
1076 return p;
1077 }
1078 PhiNode* PhiNode::make(Node* r, Node* x) {
1079 const Type* t = x->bottom_type();
1080 const TypePtr* at = nullptr;
1081 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1082 return make(r, x, t, at);
1083 }
1084 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
1085 const Type* t = x->bottom_type();
1086 const TypePtr* at = nullptr;
1087 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
1088 return new PhiNode(r, t, at);
1089 }
1177 np->as_Phi()->verify_adr_type(visited, at);
1178 } else if (n->bottom_type() == Type::TOP
1179 || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1180 // ignore top inputs
1181 } else {
1182 const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1183 // recheck phi/non-phi consistency at leaves:
1184 assert((nat != nullptr) == (at != nullptr), "");
1185 assert(nat == at || nat == TypePtr::BOTTOM,
1186 "adr_type must be consistent at leaves of phi nest");
1187 }
1188 }
1189 }
1190
1191 // Verify a whole nest of phis rooted at this one.
1192 void PhiNode::verify_adr_type(bool recursive) const {
1193 if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error
1194 if (Node::in_dump()) return; // muzzle asserts when printing
1195
1196 assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only");
1197 // Flat array element shouldn't get their own memory slice until flat_accesses_share_alias is cleared.
1198 // It could be the graph has no loads/stores and flat_accesses_share_alias is never cleared. EA could still
1199 // creates per element Phis but that wouldn't be a problem as there are no memory accesses for that array.
1200 assert(_adr_type == nullptr || _adr_type->isa_aryptr() == nullptr ||
1201 _adr_type->is_aryptr()->is_known_instance() ||
1202 !_adr_type->is_aryptr()->is_flat() ||
1203 !Compile::current()->flat_accesses_share_alias() ||
1204 _adr_type == TypeAryPtr::INLINES, "flat array element shouldn't get its own slice yet");
1205
1206 if (!VerifyAliases) return; // verify thoroughly only if requested
1207
1208 assert(_adr_type == flatten_phi_adr_type(_adr_type),
1209 "Phi::adr_type must be pre-normalized");
1210
1211 if (recursive) {
1212 VectorSet visited;
1213 verify_adr_type(visited, _adr_type);
1214 }
1215 }
1216 #endif
1217
1218
1219 //------------------------------Value------------------------------------------
1220 // Compute the type of the PhiNode
1221 const Type* PhiNode::Value(PhaseGVN* phase) const {
1222 Node *r = in(0); // RegionNode
1223 if( !r ) // Copy or dead
1224 return in(1) ? phase->type(in(1)) : Type::TOP;
1403 assert(is_diamond_phi() > 0, "sanity");
1404 assert(req() == 3, "same as region");
1405 const Node* region = in(0);
1406 for (uint i = 1; i < 3; i++) {
1407 Node* phi_input = in(i);
1408 if (phi_input != nullptr && phi_input->is_MergeMem() && region->in(i)->outcnt() == 1) {
1409 // Nothing is control-dependent on path #i except the region itself.
1410 MergeMemNode* merge_mem = phi_input->as_MergeMem();
1411 uint j = 3 - i;
1412 Node* other_phi_input = in(j);
1413 if (other_phi_input != nullptr && other_phi_input == merge_mem->base_memory()) {
1414 // merge_mem is a successor memory to other_phi_input, and is not pinned inside the diamond, so push it out.
1415 // This will allow the diamond to collapse completely if there are no other phis left.
1416 igvn->replace_node(this, merge_mem);
1417 return true;
1418 }
1419 }
1420 }
1421 return false;
1422 }
1423
1424 //----------------------------check_cmove_id-----------------------------------
1425 // Check for CMove'ing a constant after comparing against the constant.
1426 // Happens all the time now, since if we compare equality vs a constant in
1427 // the parser, we "know" the variable is constant on one path and we force
1428 // it. Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1429 // conditional move: "x = (x==0)?0:x;". Yucko. This fix is slightly more
1430 // general in that we don't need constants. Since CMove's are only inserted
1431 // in very special circumstances, we do it here on generic Phi's.
1432 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1433 assert(true_path !=0, "only diamond shape graph expected");
1434
1435 // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1436 // phi->region->if_proj->ifnode->bool->cmp
1437 Node* region = in(0);
1438 Node* iff = region->in(1)->in(0);
1439 BoolNode* b = iff->in(1)->as_Bool();
1440 Node* cmp = b->in(1);
1441 Node* tval = in(true_path);
1442 Node* fval = in(3-true_path);
1443 Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);
2024
2025 if (rc->in(0)->in(1) == nullptr || !rc->in(0)->in(1)->is_Bool()) { continue; }
2026 if (worklist.member(rc->in(0)->in(1))) {
2027 delay = true;
2028 break;
2029 }
2030
2031 if (rc->in(0)->in(1)->in(1) == nullptr || !rc->in(0)->in(1)->in(1)->is_Cmp()) { continue; }
2032 if (worklist.member(rc->in(0)->in(1)->in(1))) {
2033 delay = true;
2034 break;
2035 }
2036 }
2037
2038 if (delay) {
2039 worklist.push(this);
2040 }
2041 return delay;
2042 }
2043
2044 // Push inline type input nodes (and null) down through the phi recursively (can handle data loops).
2045 InlineTypeNode* PhiNode::push_inline_types_down(PhaseGVN* phase, bool can_reshape, ciInlineKlass* inline_klass) {
2046 assert(inline_klass != nullptr, "must be");
2047 InlineTypeNode* vt = InlineTypeNode::make_null(*phase, inline_klass, /* transform = */ false)->clone_with_phis(phase, in(0), nullptr, !_type->maybe_null());
2048 if (can_reshape) {
2049 // Replace phi right away to be able to use the inline
2050 // type node when reaching the phi again through data loops.
2051 PhaseIterGVN* igvn = phase->is_IterGVN();
2052 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2053 Node* u = fast_out(i);
2054 igvn->rehash_node_delayed(u);
2055 imax -= u->replace_edge(this, vt);
2056 --i;
2057 }
2058 igvn->rehash_node_delayed(this);
2059 assert(outcnt() == 0, "should be dead now");
2060 }
2061 ResourceMark rm;
2062 Node_List casts;
2063 for (uint i = 1; i < req(); ++i) {
2064 Node* n = in(i);
2065 while (n->is_ConstraintCast()) {
2066 casts.push(n);
2067 n = n->in(1);
2068 }
2069 if (phase->type(n)->is_zero_type()) {
2070 n = InlineTypeNode::make_null(*phase, inline_klass);
2071 } else if (n->is_Phi()) {
2072 assert(can_reshape, "can only handle phis during IGVN");
2073 n = phase->transform(n->as_Phi()->push_inline_types_down(phase, can_reshape, inline_klass));
2074 }
2075 while (casts.size() != 0) {
2076 // Push the cast(s) through the InlineTypeNode
2077 // TODO 8302217 Can we avoid cloning? See InlineTypeNode::clone_if_required
2078 Node* cast = casts.pop()->clone();
2079 cast->set_req_X(1, n->as_InlineType()->get_oop(), phase);
2080 n = n->clone();
2081 n->as_InlineType()->set_oop(*phase, phase->transform(cast));
2082 n = phase->transform(n);
2083 }
2084 bool transform = !can_reshape && (i == (req()-1)); // Transform phis on last merge
2085 vt->merge_with(phase, n->as_InlineType(), i, transform);
2086 }
2087 return vt;
2088 }
2089
2090 // If the Phi's Region is in an irreducible loop, and the Region
2091 // has had an input removed, but not yet transformed, it could be
2092 // that the Region (and this Phi) are not reachable from Root.
2093 // If we allow the Phi to collapse before the Region, this may lead
2094 // to dead-loop data. Wait for the Region to check for reachability,
2095 // and potentially remove the dead code.
2096 bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const {
2097 RegionNode* region = in(0)->as_Region();
2098 if (region->loop_status() == RegionNode::LoopStatus::MaybeIrreducibleEntry) {
2099 Node* top = phase->C->top();
2100 for (uint j = 1; j < req(); j++) {
2101 Node* rc = region->in(j); // for each control input
2102 if (rc == nullptr || phase->type(rc) == Type::TOP) {
2103 // Region is missing a control input
2104 Node* n = in(j);
2105 if (n != nullptr && n != top) {
2106 // Phi still has its input, so region just lost its input
2107 return true;
2108 }
2109 }
2402 for (uint i = 1; i < req(); i++) {
2403 offset->init_req(i, in(i)->in(AddPNode::Offset));
2404 }
2405 phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2406 }
2407 return new AddPNode(base, address, offset);
2408 }
2409 }
2410 }
2411
2412 // Split phis through memory merges, so that the memory merges will go away.
2413 // Piggy-back this transformation on the search for a unique input....
2414 // It will be as if the merged memory is the unique value of the phi.
2415 // (Do not attempt this optimization unless parsing is complete.
2416 // It would make the parser's memory-merge logic sick.)
2417 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2418 if (progress == nullptr && can_reshape && type() == Type::MEMORY) {
2419 // see if this phi should be sliced
2420 uint merge_width = 0;
2421 bool saw_self = false;
2422 // TODO revisit this with JDK-8247216
2423 bool mergemem_only = true;
2424 for( uint i=1; i<req(); ++i ) {// For all paths in
2425 Node *ii = in(i);
2426 // TOP inputs should not be counted as safe inputs because if the
2427 // Phi references itself through all other inputs then splitting the
2428 // Phi through memory merges would create dead loop at later stage.
2429 if (ii == top) {
2430 return nullptr; // Delay optimization until graph is cleaned.
2431 }
2432 if (ii->is_MergeMem()) {
2433 MergeMemNode* n = ii->as_MergeMem();
2434 merge_width = MAX2(merge_width, n->req());
2435 saw_self = saw_self || (n->base_memory() == this);
2436 } else {
2437 mergemem_only = false;
2438 }
2439 }
2440
2441 // This restriction is temporarily necessary to ensure termination:
2442 if (!mergemem_only && !saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0;
2443
2444 if (merge_width > Compile::AliasIdxRaw) {
2445 // found at least one non-empty MergeMem
2446 const TypePtr* at = adr_type();
2447 if (at != TypePtr::BOTTOM) {
2448 // Patch the existing phi to select an input from the merge:
2449 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2450 // Phi:AT1(...m1...)
2451 int alias_idx = phase->C->get_alias_index(at);
2452 for (uint i=1; i<req(); ++i) {
2453 Node *ii = in(i);
2454 if (ii->is_MergeMem()) {
2455 MergeMemNode* n = ii->as_MergeMem();
2456 // compress paths and change unreachable cycles to TOP
2457 // If not, we can update the input infinitely along a MergeMem cycle
2458 // Equivalent code is in MemNode::Ideal_common
2459 Node *m = phase->transform(n);
2460 if (outcnt() == 0) { // Above transform() may kill us!
2461 return top;
2462 }
2493 if (!saw_safe_input) {
2494 // There is a dead loop: All inputs are either dead or reference back to this phi
2495 return top;
2496 }
2497
2498 // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2499 // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2500 PhaseIterGVN* igvn = phase->is_IterGVN();
2501 assert(igvn != nullptr, "sanity check");
2502 Node* hook = new Node(1);
2503 PhiNode* new_base = (PhiNode*) clone();
2504 // Must eagerly register phis, since they participate in loops.
2505 igvn->register_new_node_with_optimizer(new_base);
2506 hook->add_req(new_base);
2507
2508 MergeMemNode* result = MergeMemNode::make(new_base);
2509 for (uint i = 1; i < req(); ++i) {
2510 Node *ii = in(i);
2511 if (ii->is_MergeMem()) {
2512 MergeMemNode* n = ii->as_MergeMem();
2513 if (igvn) {
2514 // TODO revisit this with JDK-8247216
2515 // Put 'n' on the worklist because it might be modified by MergeMemStream::iteration_setup
2516 igvn->_worklist.push(n);
2517 }
2518 for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2519 // If we have not seen this slice yet, make a phi for it.
2520 bool made_new_phi = false;
2521 if (mms.is_empty()) {
2522 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2523 made_new_phi = true;
2524 igvn->register_new_node_with_optimizer(new_phi);
2525 hook->add_req(new_phi);
2526 mms.set_memory(new_phi);
2527 }
2528 Node* phi = mms.memory();
2529 assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2530 phi->set_req(i, mms.memory2());
2531 }
2532 }
2533 }
2534 // Distribute all self-loops.
2535 { // (Extra braces to hide mms.)
2536 for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2537 Node* phi = mms.memory();
2616 if (is_decodeN) {
2617 new_ii = new EncodePNode(ii, narrow_t);
2618 } else {
2619 new_ii = new EncodePKlassNode(ii, narrow_t);
2620 }
2621 igvn->register_new_node_with_optimizer(new_ii);
2622 }
2623 }
2624 new_phi->set_req(i, new_ii);
2625 }
2626 igvn->register_new_node_with_optimizer(new_phi, this);
2627 if (is_decodeN) {
2628 progress = new DecodeNNode(new_phi, bottom_type());
2629 } else {
2630 progress = new DecodeNKlassNode(new_phi, bottom_type());
2631 }
2632 }
2633 }
2634 #endif
2635
2636 Node* inline_type = try_push_inline_types_down(phase, can_reshape);
2637 if (inline_type != this) {
2638 return inline_type;
2639 }
2640
2641 // Try to convert a Phi with two duplicated convert nodes into a phi of the pre-conversion type and the convert node
2642 // proceeding the phi, to de-duplicate the convert node and compact the IR.
2643 if (can_reshape && progress == nullptr) {
2644 ConvertNode* convert = in(1)->isa_Convert();
2645 if (convert != nullptr) {
2646 int conv_op = convert->Opcode();
2647 bool ok = true;
2648
2649 // Check the rest of the inputs
2650 for (uint i = 2; i < req(); i++) {
2651 // Make sure that all inputs are of the same type of convert node
2652 if (in(i)->Opcode() != conv_op) {
2653 ok = false;
2654 break;
2655 }
2656 }
2657
2658 if (ok) {
2659 // Find the local bottom type to set as the type of the phi
2660 const Type* source_type = Type::get_const_basic_type(convert->in_type()->basic_type());
2664 // Set inputs to the new phi be the inputs of the convert
2665 for (uint i = 1; i < req(); i++) {
2666 newphi->init_req(i, in(i)->in(1));
2667 }
2668
2669 phase->is_IterGVN()->register_new_node_with_optimizer(newphi, this);
2670
2671 return ConvertNode::create_convert(get_convert_type(convert, source_type), get_convert_type(convert, dest_type), newphi);
2672 }
2673 }
2674 }
2675
2676 // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2677 if (EnableVectorReboxing && can_reshape && progress == nullptr && type()->isa_oopptr()) {
2678 progress = merge_through_phi(this, phase->is_IterGVN());
2679 }
2680
2681 return progress; // Return any progress
2682 }
2683
2684 // Check recursively if inputs are either an inline type, constant null
2685 // or another Phi (including self references through data loops). If so,
2686 // push the inline types down through the phis to enable folding of loads.
2687 Node* PhiNode::try_push_inline_types_down(PhaseGVN* phase, const bool can_reshape) {
2688 if (!can_be_inline_type()) {
2689 return this;
2690 }
2691
2692 ciInlineKlass* inline_klass;
2693 if (can_push_inline_types_down(phase, can_reshape, inline_klass)) {
2694 assert(inline_klass != nullptr, "must be");
2695 return push_inline_types_down(phase, can_reshape, inline_klass);
2696 }
2697 return this;
2698 }
2699
2700 bool PhiNode::can_push_inline_types_down(PhaseGVN* phase, const bool can_reshape, ciInlineKlass*& inline_klass) {
2701 if (req() <= 2) {
2702 // Dead phi.
2703 return false;
2704 }
2705 inline_klass = nullptr;
2706
2707 // TODO 8302217 We need to prevent endless pushing through
2708 bool only_phi = (outcnt() != 0);
2709 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2710 Node* n = fast_out(i);
2711 if (n->is_InlineType() && n->in(1) == this) {
2712 return false;
2713 }
2714 if (!n->is_Phi()) {
2715 only_phi = false;
2716 }
2717 }
2718 if (only_phi) {
2719 return false;
2720 }
2721
2722 ResourceMark rm;
2723 Unique_Node_List worklist;
2724 worklist.push(this);
2725 Node_List casts;
2726
2727 for (uint next = 0; next < worklist.size(); next++) {
2728 Node* phi = worklist.at(next);
2729 for (uint i = 1; i < phi->req(); i++) {
2730 Node* n = phi->in(i);
2731 if (n == nullptr) {
2732 return false;
2733 }
2734 while (n->is_ConstraintCast()) {
2735 if (n->in(0) != nullptr && n->in(0)->is_top()) {
2736 // Will die, don't optimize
2737 return false;
2738 }
2739 casts.push(n);
2740 n = n->in(1);
2741 }
2742 const Type* type = phase->type(n);
2743 if (n->is_InlineType() && (inline_klass == nullptr || inline_klass == type->inline_klass())) {
2744 inline_klass = type->inline_klass();
2745 } else if (n->is_Phi() && can_reshape && n->bottom_type()->isa_ptr()) {
2746 worklist.push(n);
2747 } else if (!type->is_zero_type()) {
2748 return false;
2749 }
2750 }
2751 }
2752 if (inline_klass == nullptr) {
2753 return false;
2754 }
2755
2756 // Check if cast nodes can be pushed through
2757 const Type* t = Type::get_const_type(inline_klass);
2758 while (casts.size() != 0 && t != nullptr) {
2759 Node* cast = casts.pop();
2760 if (t->filter(cast->bottom_type()) == Type::TOP) {
2761 return false;
2762 }
2763 }
2764
2765 return true;
2766 }
2767
2768 #ifdef ASSERT
2769 bool PhiNode::can_push_inline_types_down(PhaseGVN* phase) {
2770 if (!can_be_inline_type()) {
2771 return false;
2772 }
2773
2774 ciInlineKlass* inline_klass;
2775 return can_push_inline_types_down(phase, true, inline_klass);
2776 }
2777 #endif // ASSERT
2778
2779 static int compare_types(const Type* const& e1, const Type* const& e2) {
2780 return (intptr_t)e1 - (intptr_t)e2;
2781 }
2782
2783 // Collect types at casts that are going to be eliminated at that Phi and store them in a TypeTuple.
2784 // Sort the types using an arbitrary order so a list of some types always hashes to the same TypeTuple (and TypeTuple
2785 // pointer comparison is enough to tell if 2 list of types are the same or not)
2786 const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const {
2787 const Node* region = in(0);
2788 const Type* phi_type = bottom_type();
2789 ResourceMark rm;
2790 GrowableArray<const Type*> types;
2791 for (uint i = 1; i < req(); i++) {
2792 if (region->in(i) == nullptr || phase->type(region->in(i)) == Type::TOP) {
2793 continue;
2794 }
2795 Node* in = Node::in(i);
2796 const Type* t = phase->type(in);
2797 if (in == nullptr || in == this || t == Type::TOP) {
2798 continue;
3141 #ifndef PRODUCT
3142 void CatchProjNode::dump_spec(outputStream *st) const {
3143 ProjNode::dump_spec(st);
3144 st->print("@bci %d ",_handler_bci);
3145 }
3146 #endif
3147
3148 //=============================================================================
3149 //------------------------------Identity---------------------------------------
3150 // Check for CreateEx being Identity.
3151 Node* CreateExNode::Identity(PhaseGVN* phase) {
3152 if( phase->type(in(1)) == Type::TOP ) return in(1);
3153 if( phase->type(in(0)) == Type::TOP ) return in(0);
3154 if (phase->type(in(0)->in(0)) == Type::TOP) {
3155 assert(in(0)->is_CatchProj(), "control is CatchProj");
3156 return phase->C->top(); // dead code
3157 }
3158 // We only come from CatchProj, unless the CatchProj goes away.
3159 // If the CatchProj is optimized away, then we just carry the
3160 // exception oop through.
3161
3162 // CheckCastPPNode::Ideal() for inline types reuses the exception
3163 // paths of a call to perform an allocation: we can see a Phi here.
3164 if (in(1)->is_Phi()) {
3165 return this;
3166 }
3167 CallNode *call = in(1)->in(0)->as_Call();
3168
3169 return (in(0)->is_CatchProj() && in(0)->in(0)->is_Catch() &&
3170 in(0)->in(0)->in(1) == in(1)) ? this : call->in(TypeFunc::Parms);
3171 }
3172
3173 //=============================================================================
3174 //------------------------------Value------------------------------------------
3175 // Check for being unreachable.
3176 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
3177 if (!in(0) || in(0)->is_top()) return Type::TOP;
3178 return bottom_type();
3179 }
3180
3181 //------------------------------Ideal------------------------------------------
3182 // Check for no longer being part of a loop
3183 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3184 if (can_reshape && !in(0)->is_Region()) {
3185 // Dead code elimination can sometimes delete this projection so
3186 // if it's not there, there's nothing to do.
|