646 return longcon(l);
647 }
648
649
650 //------------------------------zerocon-----------------------------------------
651 // Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))"
652 ConNode* PhaseValues::zerocon(BasicType bt) {
653 assert((uint)bt <= _zcon_max, "domain check");
654 ConNode* zcon = _zcons[bt];
655 if (zcon != nullptr && zcon->in(TypeFunc::Control) != nullptr)
656 return zcon;
657 zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
658 _zcons[bt] = zcon;
659 return zcon;
660 }
661
662
663
664 //=============================================================================
665 Node* PhaseGVN::apply_ideal(Node* k, bool can_reshape) {
666 Node* i = BarrierSet::barrier_set()->barrier_set_c2()->ideal_node(this, k, can_reshape);
667 if (i == nullptr) {
668 i = k->Ideal(this, can_reshape);
669 }
670 return i;
671 }
672
673 //------------------------------transform--------------------------------------
674 // Return a node which computes the same function as this node, but
675 // in a faster or cheaper fashion.
676 Node* PhaseGVN::transform(Node* n) {
677 NOT_PRODUCT( set_transforms(); )
678
679 // Apply the Ideal call in a loop until it no longer applies
680 Node* k = n;
681 Node* i = apply_ideal(k, /*can_reshape=*/false);
682 NOT_PRODUCT(uint loop_count = 1;)
683 while (i != nullptr) {
684 assert(i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
685 k = i;
686 #ifdef ASSERT
687 if (loop_count >= K + C->live_nodes()) {
688 dump_infinite_loop_info(i, "PhaseGVN::transform");
689 }
690 #endif
2258 stack.push(in, PROCESS_INPUTS); // Recursively remove
2259 recurse = true;
2260 } else if (in->outcnt() == 1 &&
2261 in->has_special_unique_user()) {
2262 _worklist.push(in->unique_out());
2263 } else if (in->outcnt() <= 2 && dead->is_Phi()) {
2264 if (in->Opcode() == Op_Region) {
2265 _worklist.push(in);
2266 } else if (in->is_Store()) {
2267 DUIterator_Fast imax, i = in->fast_outs(imax);
2268 _worklist.push(in->fast_out(i));
2269 i++;
2270 if (in->outcnt() == 2) {
2271 _worklist.push(in->fast_out(i));
2272 i++;
2273 }
2274 assert(!(i < imax), "sanity");
2275 }
2276 } else if (dead->is_data_proj_of_pure_function(in)) {
2277 _worklist.push(in);
2278 } else {
2279 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(this, in);
2280 }
2281 if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
2282 in->is_Proj() && in->in(0) != nullptr && in->in(0)->is_Initialize()) {
2283 // A Load that directly follows an InitializeNode is
2284 // going away. The Stores that follow are candidates
2285 // again to be captured by the InitializeNode.
2286 for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
2287 Node *n = in->fast_out(j);
2288 if (n->is_Store()) {
2289 _worklist.push(n);
2290 }
2291 }
2292 }
2293 } // if (in != nullptr && in != C->top())
2294 } // for (uint i = 0; i < dead->req(); i++)
2295 if (recurse) {
2296 continue;
2297 }
2298 } // if (!dead->is_Con())
2299 } // if (progress_state == PROCESS_INPUTS)
2632 if (init != nullptr) {
2633 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2634 }
2635 }
2636 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
2637 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
2638 // to guarantee the change is not missed.
2639 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
2640 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
2641 if (p != nullptr) {
2642 add_users_to_worklist0(p, worklist);
2643 }
2644 }
2645
2646 if (use_op == Op_Initialize) {
2647 InitializeNode* init = use->as_Initialize();
2648 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2649 }
2650 // Loading the java mirror from a Klass requires two loads and the type
2651 // of the mirror load depends on the type of 'n'. See LoadNode::Value().
2652 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
2653 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2654 bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
2655
2656 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2657 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2658 Node* u = use->fast_out(i2);
2659 const Type* ut = u->bottom_type();
2660 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
2661 if (has_load_barrier_nodes) {
2662 // Search for load barriers behind the load
2663 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
2664 Node* b = u->fast_out(i3);
2665 if (bs->is_gc_barrier_node(b)) {
2666 worklist.push(b);
2667 }
2668 }
2669 }
2670 worklist.push(u);
2671 }
2672 }
2673 }
2674 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
2675 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
2676 if (use->outcnt() == 1) {
2677 Node* cmp = use->unique_out();
2678 worklist.push(cmp);
2679 }
2680 }
2681
2682 // From CastX2PNode::Ideal
2683 // CastX2P(AddX(x, y))
2684 // CastX2P(SubX(x, y))
2685 if (use->Opcode() == Op_AddX || use->Opcode() == Op_SubX) {
2686 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2687 Node* u = use->fast_out(i2);
2688 if (u->Opcode() == Op_CastX2P) {
2689 worklist.push(u);
3054 push_if_not_bottom_type(worklist, bol);
3055 }
3056 }
3057 }
3058
3059 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
3060 // Seem PhiNode::Value().
3061 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
3062 uint use_op = use->Opcode();
3063 if (use_op == Op_CmpI || use_op == Op_CmpL) {
3064 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
3065 if (phi != nullptr) {
3066 worklist.push(phi);
3067 }
3068 }
3069 }
3070
3071 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
3072 // See LoadNode::Value().
3073 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
3074 BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
3075 bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
3076
3077 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
3078 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3079 Node* loadp = use->fast_out(i);
3080 const Type* ut = loadp->bottom_type();
3081 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
3082 if (has_load_barrier_nodes) {
3083 // Search for load barriers behind the load
3084 push_load_barrier(worklist, barrier_set, loadp);
3085 }
3086 worklist.push(loadp);
3087 }
3088 }
3089 }
3090 }
3091
3092 void PhaseCCP::push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* barrier_set, const Node* use) {
3093 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3094 Node* barrier_node = use->fast_out(i);
3095 if (barrier_set->is_gc_barrier_node(barrier_node)) {
3096 worklist.push(barrier_node);
3097 }
3098 }
3099 }
3100
3101 // AndI/L::Value() optimizes patterns similar to (v << 2) & 3, or CON & 3 to zero if they are bitwise disjoint.
3102 // Add the AndI/L nodes back to the worklist to re-apply Value() in case the value is now a constant or shift
3103 // value changed.
3104 void PhaseCCP::push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const {
3105 const TypeInteger* parent_type = type(parent)->isa_integer(type(parent)->basic_type());
3441 break;
3442 case 1:
3443 if( old->is_Store() || old->has_special_unique_user() )
3444 igvn->add_users_to_worklist( old );
3445 break;
3446 case 2:
3447 if( old->is_Store() )
3448 igvn->add_users_to_worklist( old );
3449 if( old->Opcode() == Op_Region )
3450 igvn->_worklist.push(old);
3451 break;
3452 case 3:
3453 if( old->Opcode() == Op_Region ) {
3454 igvn->_worklist.push(old);
3455 igvn->add_users_to_worklist( old );
3456 }
3457 break;
3458 default:
3459 break;
3460 }
3461
3462 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(igvn, old);
3463 }
3464 }
3465
3466 void Node::set_req_X(uint i, Node *n, PhaseGVN *gvn) {
3467 PhaseIterGVN* igvn = gvn->is_IterGVN();
3468 if (igvn == nullptr) {
3469 set_req(i, n);
3470 return;
3471 }
3472 set_req_X(i, n, igvn);
3473 }
3474
3475 //-------------------------------replace_by-----------------------------------
3476 // Using def-use info, replace one node for another. Follow the def-use info
3477 // to all users of the OLD node. Then make all uses point to the NEW node.
3478 void Node::replace_by(Node *new_node) {
3479 assert(!is_top(), "top node has no DU info");
3480 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
3481 Node* use = last_out(i);
3482 uint uses_found = 0;
|
646 return longcon(l);
647 }
648
649
650 //------------------------------zerocon-----------------------------------------
651 // Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))"
652 ConNode* PhaseValues::zerocon(BasicType bt) {
653 assert((uint)bt <= _zcon_max, "domain check");
654 ConNode* zcon = _zcons[bt];
655 if (zcon != nullptr && zcon->in(TypeFunc::Control) != nullptr)
656 return zcon;
657 zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
658 _zcons[bt] = zcon;
659 return zcon;
660 }
661
662
663
664 //=============================================================================
665 Node* PhaseGVN::apply_ideal(Node* k, bool can_reshape) {
666 return k->Ideal(this, can_reshape);
667 }
668
669 //------------------------------transform--------------------------------------
670 // Return a node which computes the same function as this node, but
671 // in a faster or cheaper fashion.
672 Node* PhaseGVN::transform(Node* n) {
673 NOT_PRODUCT( set_transforms(); )
674
675 // Apply the Ideal call in a loop until it no longer applies
676 Node* k = n;
677 Node* i = apply_ideal(k, /*can_reshape=*/false);
678 NOT_PRODUCT(uint loop_count = 1;)
679 while (i != nullptr) {
680 assert(i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
681 k = i;
682 #ifdef ASSERT
683 if (loop_count >= K + C->live_nodes()) {
684 dump_infinite_loop_info(i, "PhaseGVN::transform");
685 }
686 #endif
2254 stack.push(in, PROCESS_INPUTS); // Recursively remove
2255 recurse = true;
2256 } else if (in->outcnt() == 1 &&
2257 in->has_special_unique_user()) {
2258 _worklist.push(in->unique_out());
2259 } else if (in->outcnt() <= 2 && dead->is_Phi()) {
2260 if (in->Opcode() == Op_Region) {
2261 _worklist.push(in);
2262 } else if (in->is_Store()) {
2263 DUIterator_Fast imax, i = in->fast_outs(imax);
2264 _worklist.push(in->fast_out(i));
2265 i++;
2266 if (in->outcnt() == 2) {
2267 _worklist.push(in->fast_out(i));
2268 i++;
2269 }
2270 assert(!(i < imax), "sanity");
2271 }
2272 } else if (dead->is_data_proj_of_pure_function(in)) {
2273 _worklist.push(in);
2274 }
2275 if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
2276 in->is_Proj() && in->in(0) != nullptr && in->in(0)->is_Initialize()) {
2277 // A Load that directly follows an InitializeNode is
2278 // going away. The Stores that follow are candidates
2279 // again to be captured by the InitializeNode.
2280 for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
2281 Node *n = in->fast_out(j);
2282 if (n->is_Store()) {
2283 _worklist.push(n);
2284 }
2285 }
2286 }
2287 } // if (in != nullptr && in != C->top())
2288 } // for (uint i = 0; i < dead->req(); i++)
2289 if (recurse) {
2290 continue;
2291 }
2292 } // if (!dead->is_Con())
2293 } // if (progress_state == PROCESS_INPUTS)
2626 if (init != nullptr) {
2627 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2628 }
2629 }
2630 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
2631 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
2632 // to guarantee the change is not missed.
2633 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
2634 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
2635 if (p != nullptr) {
2636 add_users_to_worklist0(p, worklist);
2637 }
2638 }
2639
2640 if (use_op == Op_Initialize) {
2641 InitializeNode* init = use->as_Initialize();
2642 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2643 }
2644 // Loading the java mirror from a Klass requires two loads and the type
2645 // of the mirror load depends on the type of 'n'. See LoadNode::Value().
2646 // LoadP(LoadP(AddP(foo:Klass, #java_mirror)))
2647 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2648 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2649 Node* u = use->fast_out(i2);
2650 const Type* ut = u->bottom_type();
2651 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
2652 worklist.push(u);
2653 }
2654 }
2655 }
2656 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
2657 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
2658 if (use->outcnt() == 1) {
2659 Node* cmp = use->unique_out();
2660 worklist.push(cmp);
2661 }
2662 }
2663
2664 // From CastX2PNode::Ideal
2665 // CastX2P(AddX(x, y))
2666 // CastX2P(SubX(x, y))
2667 if (use->Opcode() == Op_AddX || use->Opcode() == Op_SubX) {
2668 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2669 Node* u = use->fast_out(i2);
2670 if (u->Opcode() == Op_CastX2P) {
2671 worklist.push(u);
3036 push_if_not_bottom_type(worklist, bol);
3037 }
3038 }
3039 }
3040
3041 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
3042 // Seem PhiNode::Value().
3043 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
3044 uint use_op = use->Opcode();
3045 if (use_op == Op_CmpI || use_op == Op_CmpL) {
3046 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
3047 if (phi != nullptr) {
3048 worklist.push(phi);
3049 }
3050 }
3051 }
3052
3053 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
3054 // See LoadNode::Value().
3055 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
3056 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
3057 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3058 Node* loadp = use->fast_out(i);
3059 const Type* ut = loadp->bottom_type();
3060 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
3061 worklist.push(loadp);
3062 }
3063 }
3064 }
3065 }
3066
3067 void PhaseCCP::push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* barrier_set, const Node* use) {
3068 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3069 Node* barrier_node = use->fast_out(i);
3070 if (barrier_set->is_gc_barrier_node(barrier_node)) {
3071 worklist.push(barrier_node);
3072 }
3073 }
3074 }
3075
3076 // AndI/L::Value() optimizes patterns similar to (v << 2) & 3, or CON & 3 to zero if they are bitwise disjoint.
3077 // Add the AndI/L nodes back to the worklist to re-apply Value() in case the value is now a constant or shift
3078 // value changed.
3079 void PhaseCCP::push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const {
3080 const TypeInteger* parent_type = type(parent)->isa_integer(type(parent)->basic_type());
3416 break;
3417 case 1:
3418 if( old->is_Store() || old->has_special_unique_user() )
3419 igvn->add_users_to_worklist( old );
3420 break;
3421 case 2:
3422 if( old->is_Store() )
3423 igvn->add_users_to_worklist( old );
3424 if( old->Opcode() == Op_Region )
3425 igvn->_worklist.push(old);
3426 break;
3427 case 3:
3428 if( old->Opcode() == Op_Region ) {
3429 igvn->_worklist.push(old);
3430 igvn->add_users_to_worklist( old );
3431 }
3432 break;
3433 default:
3434 break;
3435 }
3436 }
3437 }
3438
3439 void Node::set_req_X(uint i, Node *n, PhaseGVN *gvn) {
3440 PhaseIterGVN* igvn = gvn->is_IterGVN();
3441 if (igvn == nullptr) {
3442 set_req(i, n);
3443 return;
3444 }
3445 set_req_X(i, n, igvn);
3446 }
3447
3448 //-------------------------------replace_by-----------------------------------
3449 // Using def-use info, replace one node for another. Follow the def-use info
3450 // to all users of the OLD node. Then make all uses point to the NEW node.
3451 void Node::replace_by(Node *new_node) {
3452 assert(!is_top(), "top node has no DU info");
3453 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
3454 Node* use = last_out(i);
3455 uint uses_found = 0;
|