646 return longcon(l);
647 }
648
649
650 //------------------------------zerocon-----------------------------------------
651 // Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))"
652 ConNode* PhaseValues::zerocon(BasicType bt) {
653 assert((uint)bt <= _zcon_max, "domain check");
654 ConNode* zcon = _zcons[bt];
655 if (zcon != nullptr && zcon->in(TypeFunc::Control) != nullptr)
656 return zcon;
657 zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
658 _zcons[bt] = zcon;
659 return zcon;
660 }
661
662
663
664 //=============================================================================
665 Node* PhaseGVN::apply_ideal(Node* k, bool can_reshape) {
666 Node* i = BarrierSet::barrier_set()->barrier_set_c2()->ideal_node(this, k, can_reshape);
667 if (i == nullptr) {
668 i = k->Ideal(this, can_reshape);
669 }
670 return i;
671 }
672
673 //------------------------------transform--------------------------------------
674 // Return a node which computes the same function as this node, but
675 // in a faster or cheaper fashion.
676 Node* PhaseGVN::transform(Node* n) {
677 NOT_PRODUCT( set_transforms(); )
678
679 // Apply the Ideal call in a loop until it no longer applies
680 Node* k = n;
681 Node* i = apply_ideal(k, /*can_reshape=*/false);
682 NOT_PRODUCT(uint loop_count = 1;)
683 while (i != nullptr) {
684 assert(i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
685 k = i;
686 #ifdef ASSERT
687 if (loop_count >= K + C->live_nodes()) {
688 dump_infinite_loop_info(i, "PhaseGVN::transform");
689 }
690 #endif
2241 stack.push(in, PROCESS_INPUTS); // Recursively remove
2242 recurse = true;
2243 } else if (in->outcnt() == 1 &&
2244 in->has_special_unique_user()) {
2245 _worklist.push(in->unique_out());
2246 } else if (in->outcnt() <= 2 && dead->is_Phi()) {
2247 if (in->Opcode() == Op_Region) {
2248 _worklist.push(in);
2249 } else if (in->is_Store()) {
2250 DUIterator_Fast imax, i = in->fast_outs(imax);
2251 _worklist.push(in->fast_out(i));
2252 i++;
2253 if (in->outcnt() == 2) {
2254 _worklist.push(in->fast_out(i));
2255 i++;
2256 }
2257 assert(!(i < imax), "sanity");
2258 }
2259 } else if (dead->is_data_proj_of_pure_function(in)) {
2260 _worklist.push(in);
2261 } else {
2262 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(this, in);
2263 }
2264 if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
2265 in->is_Proj() && in->in(0) != nullptr && in->in(0)->is_Initialize()) {
2266 // A Load that directly follows an InitializeNode is
2267 // going away. The Stores that follow are candidates
2268 // again to be captured by the InitializeNode.
2269 for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
2270 Node *n = in->fast_out(j);
2271 if (n->is_Store()) {
2272 _worklist.push(n);
2273 }
2274 }
2275 }
2276 } // if (in != nullptr && in != C->top())
2277 } // for (uint i = 0; i < dead->req(); i++)
2278 if (recurse) {
2279 continue;
2280 }
2281 } // if (!dead->is_Con())
2282 } // if (progress_state == PROCESS_INPUTS)
2615 if (init != nullptr) {
2616 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2617 }
2618 }
2619 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
2620 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
2621 // to guarantee the change is not missed.
2622 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
2623 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
2624 if (p != nullptr) {
2625 add_users_to_worklist0(p, worklist);
2626 }
2627 }
2628
2629 if (use_op == Op_Initialize) {
2630 InitializeNode* init = use->as_Initialize();
2631 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2632 }
2633 // Loading the java mirror from a Klass requires two loads and the type
2634 // of the mirror load depends on the type of 'n'. See LoadNode::Value().
2635 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
2636 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2637 bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
2638
2639 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2640 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2641 Node* u = use->fast_out(i2);
2642 const Type* ut = u->bottom_type();
2643 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
2644 if (has_load_barrier_nodes) {
2645 // Search for load barriers behind the load
2646 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
2647 Node* b = u->fast_out(i3);
2648 if (bs->is_gc_barrier_node(b)) {
2649 worklist.push(b);
2650 }
2651 }
2652 }
2653 worklist.push(u);
2654 }
2655 }
2656 }
2657 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
2658 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
2659 if (use->outcnt() == 1) {
2660 Node* cmp = use->unique_out();
2661 worklist.push(cmp);
2662 }
2663 }
2664
2665 // From CastX2PNode::Ideal
2666 // CastX2P(AddX(x, y))
2667 // CastX2P(SubX(x, y))
2668 if (use->Opcode() == Op_AddX || use->Opcode() == Op_SubX) {
2669 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2670 Node* u = use->fast_out(i2);
2671 if (u->Opcode() == Op_CastX2P) {
2672 worklist.push(u);
2995 push_if_not_bottom_type(worklist, bol);
2996 }
2997 }
2998 }
2999
3000 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
3001 // Seem PhiNode::Value().
3002 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
3003 uint use_op = use->Opcode();
3004 if (use_op == Op_CmpI || use_op == Op_CmpL) {
3005 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
3006 if (phi != nullptr) {
3007 worklist.push(phi);
3008 }
3009 }
3010 }
3011
3012 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
3013 // See LoadNode::Value().
3014 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
3015 BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
3016 bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
3017
3018 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
3019 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3020 Node* loadp = use->fast_out(i);
3021 const Type* ut = loadp->bottom_type();
3022 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
3023 if (has_load_barrier_nodes) {
3024 // Search for load barriers behind the load
3025 push_load_barrier(worklist, barrier_set, loadp);
3026 }
3027 worklist.push(loadp);
3028 }
3029 }
3030 }
3031 }
3032
3033 void PhaseCCP::push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* barrier_set, const Node* use) {
3034 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3035 Node* barrier_node = use->fast_out(i);
3036 if (barrier_set->is_gc_barrier_node(barrier_node)) {
3037 worklist.push(barrier_node);
3038 }
3039 }
3040 }
3041
3042 // AndI/L::Value() optimizes patterns similar to (v << 2) & 3, or CON & 3 to zero if they are bitwise disjoint.
3043 // Add the AndI/L nodes back to the worklist to re-apply Value() in case the value is now a constant or shift
3044 // value changed.
3045 void PhaseCCP::push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const {
3046 const TypeInteger* parent_type = type(parent)->isa_integer(type(parent)->basic_type());
3382 break;
3383 case 1:
3384 if( old->is_Store() || old->has_special_unique_user() )
3385 igvn->add_users_to_worklist( old );
3386 break;
3387 case 2:
3388 if( old->is_Store() )
3389 igvn->add_users_to_worklist( old );
3390 if( old->Opcode() == Op_Region )
3391 igvn->_worklist.push(old);
3392 break;
3393 case 3:
3394 if( old->Opcode() == Op_Region ) {
3395 igvn->_worklist.push(old);
3396 igvn->add_users_to_worklist( old );
3397 }
3398 break;
3399 default:
3400 break;
3401 }
3402
3403 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(igvn, old);
3404 }
3405 }
3406
3407 void Node::set_req_X(uint i, Node *n, PhaseGVN *gvn) {
3408 PhaseIterGVN* igvn = gvn->is_IterGVN();
3409 if (igvn == nullptr) {
3410 set_req(i, n);
3411 return;
3412 }
3413 set_req_X(i, n, igvn);
3414 }
3415
3416 //-------------------------------replace_by-----------------------------------
3417 // Using def-use info, replace one node for another. Follow the def-use info
3418 // to all users of the OLD node. Then make all uses point to the NEW node.
3419 void Node::replace_by(Node *new_node) {
3420 assert(!is_top(), "top node has no DU info");
3421 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
3422 Node* use = last_out(i);
3423 uint uses_found = 0;
|
646 return longcon(l);
647 }
648
649
650 //------------------------------zerocon-----------------------------------------
651 // Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))"
652 ConNode* PhaseValues::zerocon(BasicType bt) {
653 assert((uint)bt <= _zcon_max, "domain check");
654 ConNode* zcon = _zcons[bt];
655 if (zcon != nullptr && zcon->in(TypeFunc::Control) != nullptr)
656 return zcon;
657 zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
658 _zcons[bt] = zcon;
659 return zcon;
660 }
661
662
663
664 //=============================================================================
665 Node* PhaseGVN::apply_ideal(Node* k, bool can_reshape) {
666 return k->Ideal(this, can_reshape);
667 }
668
669 //------------------------------transform--------------------------------------
670 // Return a node which computes the same function as this node, but
671 // in a faster or cheaper fashion.
672 Node* PhaseGVN::transform(Node* n) {
673 NOT_PRODUCT( set_transforms(); )
674
675 // Apply the Ideal call in a loop until it no longer applies
676 Node* k = n;
677 Node* i = apply_ideal(k, /*can_reshape=*/false);
678 NOT_PRODUCT(uint loop_count = 1;)
679 while (i != nullptr) {
680 assert(i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
681 k = i;
682 #ifdef ASSERT
683 if (loop_count >= K + C->live_nodes()) {
684 dump_infinite_loop_info(i, "PhaseGVN::transform");
685 }
686 #endif
2237 stack.push(in, PROCESS_INPUTS); // Recursively remove
2238 recurse = true;
2239 } else if (in->outcnt() == 1 &&
2240 in->has_special_unique_user()) {
2241 _worklist.push(in->unique_out());
2242 } else if (in->outcnt() <= 2 && dead->is_Phi()) {
2243 if (in->Opcode() == Op_Region) {
2244 _worklist.push(in);
2245 } else if (in->is_Store()) {
2246 DUIterator_Fast imax, i = in->fast_outs(imax);
2247 _worklist.push(in->fast_out(i));
2248 i++;
2249 if (in->outcnt() == 2) {
2250 _worklist.push(in->fast_out(i));
2251 i++;
2252 }
2253 assert(!(i < imax), "sanity");
2254 }
2255 } else if (dead->is_data_proj_of_pure_function(in)) {
2256 _worklist.push(in);
2257 }
2258 if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
2259 in->is_Proj() && in->in(0) != nullptr && in->in(0)->is_Initialize()) {
2260 // A Load that directly follows an InitializeNode is
2261 // going away. The Stores that follow are candidates
2262 // again to be captured by the InitializeNode.
2263 for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
2264 Node *n = in->fast_out(j);
2265 if (n->is_Store()) {
2266 _worklist.push(n);
2267 }
2268 }
2269 }
2270 } // if (in != nullptr && in != C->top())
2271 } // for (uint i = 0; i < dead->req(); i++)
2272 if (recurse) {
2273 continue;
2274 }
2275 } // if (!dead->is_Con())
2276 } // if (progress_state == PROCESS_INPUTS)
2609 if (init != nullptr) {
2610 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2611 }
2612 }
2613 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
2614 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
2615 // to guarantee the change is not missed.
2616 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
2617 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
2618 if (p != nullptr) {
2619 add_users_to_worklist0(p, worklist);
2620 }
2621 }
2622
2623 if (use_op == Op_Initialize) {
2624 InitializeNode* init = use->as_Initialize();
2625 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2626 }
2627 // Loading the java mirror from a Klass requires two loads and the type
2628 // of the mirror load depends on the type of 'n'. See LoadNode::Value().
2629 // LoadP(LoadP(AddP(foo:Klass, #java_mirror)))
2630 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2631 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2632 Node* u = use->fast_out(i2);
2633 const Type* ut = u->bottom_type();
2634 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
2635 worklist.push(u);
2636 }
2637 }
2638 }
2639 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
2640 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
2641 if (use->outcnt() == 1) {
2642 Node* cmp = use->unique_out();
2643 worklist.push(cmp);
2644 }
2645 }
2646
2647 // From CastX2PNode::Ideal
2648 // CastX2P(AddX(x, y))
2649 // CastX2P(SubX(x, y))
2650 if (use->Opcode() == Op_AddX || use->Opcode() == Op_SubX) {
2651 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2652 Node* u = use->fast_out(i2);
2653 if (u->Opcode() == Op_CastX2P) {
2654 worklist.push(u);
2977 push_if_not_bottom_type(worklist, bol);
2978 }
2979 }
2980 }
2981
2982 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
2983 // Seem PhiNode::Value().
2984 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
2985 uint use_op = use->Opcode();
2986 if (use_op == Op_CmpI || use_op == Op_CmpL) {
2987 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
2988 if (phi != nullptr) {
2989 worklist.push(phi);
2990 }
2991 }
2992 }
2993
2994 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
2995 // See LoadNode::Value().
2996 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
2997 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2998 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2999 Node* loadp = use->fast_out(i);
3000 const Type* ut = loadp->bottom_type();
3001 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
3002 worklist.push(loadp);
3003 }
3004 }
3005 }
3006 }
3007
3008 void PhaseCCP::push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* barrier_set, const Node* use) {
3009 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3010 Node* barrier_node = use->fast_out(i);
3011 if (barrier_set->is_gc_barrier_node(barrier_node)) {
3012 worklist.push(barrier_node);
3013 }
3014 }
3015 }
3016
3017 // AndI/L::Value() optimizes patterns similar to (v << 2) & 3, or CON & 3 to zero if they are bitwise disjoint.
3018 // Add the AndI/L nodes back to the worklist to re-apply Value() in case the value is now a constant or shift
3019 // value changed.
3020 void PhaseCCP::push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const {
3021 const TypeInteger* parent_type = type(parent)->isa_integer(type(parent)->basic_type());
3357 break;
3358 case 1:
3359 if( old->is_Store() || old->has_special_unique_user() )
3360 igvn->add_users_to_worklist( old );
3361 break;
3362 case 2:
3363 if( old->is_Store() )
3364 igvn->add_users_to_worklist( old );
3365 if( old->Opcode() == Op_Region )
3366 igvn->_worklist.push(old);
3367 break;
3368 case 3:
3369 if( old->Opcode() == Op_Region ) {
3370 igvn->_worklist.push(old);
3371 igvn->add_users_to_worklist( old );
3372 }
3373 break;
3374 default:
3375 break;
3376 }
3377 }
3378 }
3379
3380 void Node::set_req_X(uint i, Node *n, PhaseGVN *gvn) {
3381 PhaseIterGVN* igvn = gvn->is_IterGVN();
3382 if (igvn == nullptr) {
3383 set_req(i, n);
3384 return;
3385 }
3386 set_req_X(i, n, igvn);
3387 }
3388
3389 //-------------------------------replace_by-----------------------------------
3390 // Using def-use info, replace one node for another. Follow the def-use info
3391 // to all users of the OLD node. Then make all uses point to the NEW node.
3392 void Node::replace_by(Node *new_node) {
3393 assert(!is_top(), "top node has no DU info");
3394 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
3395 Node* use = last_out(i);
3396 uint uses_found = 0;
|