646 return longcon(l);
647 }
648
649
650 //------------------------------zerocon-----------------------------------------
651 // Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))"
652 ConNode* PhaseValues::zerocon(BasicType bt) {
653 assert((uint)bt <= _zcon_max, "domain check");
654 ConNode* zcon = _zcons[bt];
655 if (zcon != nullptr && zcon->in(TypeFunc::Control) != nullptr)
656 return zcon;
657 zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
658 _zcons[bt] = zcon;
659 return zcon;
660 }
661
662
663
664 //=============================================================================
665 Node* PhaseGVN::apply_ideal(Node* k, bool can_reshape) {
666 Node* i = BarrierSet::barrier_set()->barrier_set_c2()->ideal_node(this, k, can_reshape);
667 if (i == nullptr) {
668 i = k->Ideal(this, can_reshape);
669 }
670 return i;
671 }
672
673 //------------------------------transform--------------------------------------
674 // Return a node which computes the same function as this node, but
675 // in a faster or cheaper fashion.
676 Node* PhaseGVN::transform(Node* n) {
677 NOT_PRODUCT( set_transforms(); )
678
679 // Apply the Ideal call in a loop until it no longer applies
680 Node* k = n;
681 Node* i = apply_ideal(k, /*can_reshape=*/false);
682 NOT_PRODUCT(uint loop_count = 1;)
683 while (i != nullptr) {
684 assert(i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
685 k = i;
686 #ifdef ASSERT
687 if (loop_count >= K + C->live_nodes()) {
688 dump_infinite_loop_info(i, "PhaseGVN::transform");
689 }
690 #endif
2241 stack.push(in, PROCESS_INPUTS); // Recursively remove
2242 recurse = true;
2243 } else if (in->outcnt() == 1 &&
2244 in->has_special_unique_user()) {
2245 _worklist.push(in->unique_out());
2246 } else if (in->outcnt() <= 2 && dead->is_Phi()) {
2247 if (in->Opcode() == Op_Region) {
2248 _worklist.push(in);
2249 } else if (in->is_Store()) {
2250 DUIterator_Fast imax, i = in->fast_outs(imax);
2251 _worklist.push(in->fast_out(i));
2252 i++;
2253 if (in->outcnt() == 2) {
2254 _worklist.push(in->fast_out(i));
2255 i++;
2256 }
2257 assert(!(i < imax), "sanity");
2258 }
2259 } else if (dead->is_data_proj_of_pure_function(in)) {
2260 _worklist.push(in);
2261 } else {
2262 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(this, in);
2263 }
2264 if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
2265 in->is_Proj() && in->in(0) != nullptr && in->in(0)->is_Initialize()) {
2266 // A Load that directly follows an InitializeNode is
2267 // going away. The Stores that follow are candidates
2268 // again to be captured by the InitializeNode.
2269 for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
2270 Node *n = in->fast_out(j);
2271 if (n->is_Store()) {
2272 _worklist.push(n);
2273 }
2274 }
2275 }
2276 } // if (in != nullptr && in != C->top())
2277 } // for (uint i = 0; i < dead->req(); i++)
2278 if (recurse) {
2279 continue;
2280 }
2281 } // if (!dead->is_Con())
2282 } // if (progress_state == PROCESS_INPUTS)
2599 Node* imem = init->proj_out_or_null(TypeFunc::Memory);
2600 if (imem != nullptr) add_users_to_worklist0(imem, worklist);
2601 }
2602 }
2603 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
2604 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
2605 // to guarantee the change is not missed.
2606 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
2607 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
2608 if (p != nullptr) {
2609 add_users_to_worklist0(p, worklist);
2610 }
2611 }
2612
2613 if (use_op == Op_Initialize) {
2614 Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
2615 if (imem != nullptr) add_users_to_worklist0(imem, worklist);
2616 }
2617 // Loading the java mirror from a Klass requires two loads and the type
2618 // of the mirror load depends on the type of 'n'. See LoadNode::Value().
2619 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
2620 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2621 bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
2622
2623 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2624 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2625 Node* u = use->fast_out(i2);
2626 const Type* ut = u->bottom_type();
2627 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
2628 if (has_load_barrier_nodes) {
2629 // Search for load barriers behind the load
2630 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
2631 Node* b = u->fast_out(i3);
2632 if (bs->is_gc_barrier_node(b)) {
2633 worklist.push(b);
2634 }
2635 }
2636 }
2637 worklist.push(u);
2638 }
2639 }
2640 }
2641 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
2642 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
2643 if (use->outcnt() == 1) {
2644 Node* cmp = use->unique_out();
2645 worklist.push(cmp);
2646 }
2647 }
2648
2649 // From CastX2PNode::Ideal
2650 // CastX2P(AddX(x, y))
2651 // CastX2P(SubX(x, y))
2652 if (use->Opcode() == Op_AddX || use->Opcode() == Op_SubX) {
2653 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2654 Node* u = use->fast_out(i2);
2655 if (u->Opcode() == Op_CastX2P) {
2656 worklist.push(u);
2979 push_if_not_bottom_type(worklist, bol);
2980 }
2981 }
2982 }
2983
2984 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
2985 // Seem PhiNode::Value().
2986 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
2987 uint use_op = use->Opcode();
2988 if (use_op == Op_CmpI || use_op == Op_CmpL) {
2989 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
2990 if (phi != nullptr) {
2991 worklist.push(phi);
2992 }
2993 }
2994 }
2995
2996 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
2997 // See LoadNode::Value().
2998 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
2999 BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
3000 bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
3001
3002 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
3003 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3004 Node* loadp = use->fast_out(i);
3005 const Type* ut = loadp->bottom_type();
3006 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
3007 if (has_load_barrier_nodes) {
3008 // Search for load barriers behind the load
3009 push_load_barrier(worklist, barrier_set, loadp);
3010 }
3011 worklist.push(loadp);
3012 }
3013 }
3014 }
3015 }
3016
3017 void PhaseCCP::push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* barrier_set, const Node* use) {
3018 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3019 Node* barrier_node = use->fast_out(i);
3020 if (barrier_set->is_gc_barrier_node(barrier_node)) {
3021 worklist.push(barrier_node);
3022 }
3023 }
3024 }
3025
3026 // AndI/L::Value() optimizes patterns similar to (v << 2) & 3, or CON & 3 to zero if they are bitwise disjoint.
3027 // Add the AndI/L nodes back to the worklist to re-apply Value() in case the value is now a constant or shift
3028 // value changed.
3029 void PhaseCCP::push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const {
3030 const TypeInteger* parent_type = type(parent)->isa_integer(type(parent)->basic_type());
3366 break;
3367 case 1:
3368 if( old->is_Store() || old->has_special_unique_user() )
3369 igvn->add_users_to_worklist( old );
3370 break;
3371 case 2:
3372 if( old->is_Store() )
3373 igvn->add_users_to_worklist( old );
3374 if( old->Opcode() == Op_Region )
3375 igvn->_worklist.push(old);
3376 break;
3377 case 3:
3378 if( old->Opcode() == Op_Region ) {
3379 igvn->_worklist.push(old);
3380 igvn->add_users_to_worklist( old );
3381 }
3382 break;
3383 default:
3384 break;
3385 }
3386
3387 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(igvn, old);
3388 }
3389 }
3390
3391 void Node::set_req_X(uint i, Node *n, PhaseGVN *gvn) {
3392 PhaseIterGVN* igvn = gvn->is_IterGVN();
3393 if (igvn == nullptr) {
3394 set_req(i, n);
3395 return;
3396 }
3397 set_req_X(i, n, igvn);
3398 }
3399
3400 //-------------------------------replace_by-----------------------------------
3401 // Using def-use info, replace one node for another. Follow the def-use info
3402 // to all users of the OLD node. Then make all uses point to the NEW node.
3403 void Node::replace_by(Node *new_node) {
3404 assert(!is_top(), "top node has no DU info");
3405 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
3406 Node* use = last_out(i);
3407 uint uses_found = 0;
|
646 return longcon(l);
647 }
648
649
650 //------------------------------zerocon-----------------------------------------
651 // Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))"
652 ConNode* PhaseValues::zerocon(BasicType bt) {
653 assert((uint)bt <= _zcon_max, "domain check");
654 ConNode* zcon = _zcons[bt];
655 if (zcon != nullptr && zcon->in(TypeFunc::Control) != nullptr)
656 return zcon;
657 zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
658 _zcons[bt] = zcon;
659 return zcon;
660 }
661
662
663
664 //=============================================================================
665 Node* PhaseGVN::apply_ideal(Node* k, bool can_reshape) {
666 return k->Ideal(this, can_reshape);
667 }
668
669 //------------------------------transform--------------------------------------
670 // Return a node which computes the same function as this node, but
671 // in a faster or cheaper fashion.
672 Node* PhaseGVN::transform(Node* n) {
673 NOT_PRODUCT( set_transforms(); )
674
675 // Apply the Ideal call in a loop until it no longer applies
676 Node* k = n;
677 Node* i = apply_ideal(k, /*can_reshape=*/false);
678 NOT_PRODUCT(uint loop_count = 1;)
679 while (i != nullptr) {
680 assert(i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
681 k = i;
682 #ifdef ASSERT
683 if (loop_count >= K + C->live_nodes()) {
684 dump_infinite_loop_info(i, "PhaseGVN::transform");
685 }
686 #endif
2237 stack.push(in, PROCESS_INPUTS); // Recursively remove
2238 recurse = true;
2239 } else if (in->outcnt() == 1 &&
2240 in->has_special_unique_user()) {
2241 _worklist.push(in->unique_out());
2242 } else if (in->outcnt() <= 2 && dead->is_Phi()) {
2243 if (in->Opcode() == Op_Region) {
2244 _worklist.push(in);
2245 } else if (in->is_Store()) {
2246 DUIterator_Fast imax, i = in->fast_outs(imax);
2247 _worklist.push(in->fast_out(i));
2248 i++;
2249 if (in->outcnt() == 2) {
2250 _worklist.push(in->fast_out(i));
2251 i++;
2252 }
2253 assert(!(i < imax), "sanity");
2254 }
2255 } else if (dead->is_data_proj_of_pure_function(in)) {
2256 _worklist.push(in);
2257 }
2258 if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
2259 in->is_Proj() && in->in(0) != nullptr && in->in(0)->is_Initialize()) {
2260 // A Load that directly follows an InitializeNode is
2261 // going away. The Stores that follow are candidates
2262 // again to be captured by the InitializeNode.
2263 for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
2264 Node *n = in->fast_out(j);
2265 if (n->is_Store()) {
2266 _worklist.push(n);
2267 }
2268 }
2269 }
2270 } // if (in != nullptr && in != C->top())
2271 } // for (uint i = 0; i < dead->req(); i++)
2272 if (recurse) {
2273 continue;
2274 }
2275 } // if (!dead->is_Con())
2276 } // if (progress_state == PROCESS_INPUTS)
2593 Node* imem = init->proj_out_or_null(TypeFunc::Memory);
2594 if (imem != nullptr) add_users_to_worklist0(imem, worklist);
2595 }
2596 }
2597 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
2598 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
2599 // to guarantee the change is not missed.
2600 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
2601 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
2602 if (p != nullptr) {
2603 add_users_to_worklist0(p, worklist);
2604 }
2605 }
2606
2607 if (use_op == Op_Initialize) {
2608 Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
2609 if (imem != nullptr) add_users_to_worklist0(imem, worklist);
2610 }
2611 // Loading the java mirror from a Klass requires two loads and the type
2612 // of the mirror load depends on the type of 'n'. See LoadNode::Value().
2613 // LoadP(LoadP(AddP(foo:Klass, #java_mirror)))
2614 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2615 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2616 Node* u = use->fast_out(i2);
2617 const Type* ut = u->bottom_type();
2618 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
2619 worklist.push(u);
2620 }
2621 }
2622 }
2623 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
2624 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
2625 if (use->outcnt() == 1) {
2626 Node* cmp = use->unique_out();
2627 worklist.push(cmp);
2628 }
2629 }
2630
2631 // From CastX2PNode::Ideal
2632 // CastX2P(AddX(x, y))
2633 // CastX2P(SubX(x, y))
2634 if (use->Opcode() == Op_AddX || use->Opcode() == Op_SubX) {
2635 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2636 Node* u = use->fast_out(i2);
2637 if (u->Opcode() == Op_CastX2P) {
2638 worklist.push(u);
2961 push_if_not_bottom_type(worklist, bol);
2962 }
2963 }
2964 }
2965
2966 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
2967 // Seem PhiNode::Value().
2968 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
2969 uint use_op = use->Opcode();
2970 if (use_op == Op_CmpI || use_op == Op_CmpL) {
2971 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
2972 if (phi != nullptr) {
2973 worklist.push(phi);
2974 }
2975 }
2976 }
2977
2978 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
2979 // See LoadNode::Value().
2980 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
2981 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2982 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2983 Node* loadp = use->fast_out(i);
2984 const Type* ut = loadp->bottom_type();
2985 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
2986 worklist.push(loadp);
2987 }
2988 }
2989 }
2990 }
2991
2992 void PhaseCCP::push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* barrier_set, const Node* use) {
2993 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2994 Node* barrier_node = use->fast_out(i);
2995 if (barrier_set->is_gc_barrier_node(barrier_node)) {
2996 worklist.push(barrier_node);
2997 }
2998 }
2999 }
3000
3001 // AndI/L::Value() optimizes patterns similar to (v << 2) & 3, or CON & 3 to zero if they are bitwise disjoint.
3002 // Add the AndI/L nodes back to the worklist to re-apply Value() in case the value is now a constant or shift
3003 // value changed.
3004 void PhaseCCP::push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const {
3005 const TypeInteger* parent_type = type(parent)->isa_integer(type(parent)->basic_type());
3341 break;
3342 case 1:
3343 if( old->is_Store() || old->has_special_unique_user() )
3344 igvn->add_users_to_worklist( old );
3345 break;
3346 case 2:
3347 if( old->is_Store() )
3348 igvn->add_users_to_worklist( old );
3349 if( old->Opcode() == Op_Region )
3350 igvn->_worklist.push(old);
3351 break;
3352 case 3:
3353 if( old->Opcode() == Op_Region ) {
3354 igvn->_worklist.push(old);
3355 igvn->add_users_to_worklist( old );
3356 }
3357 break;
3358 default:
3359 break;
3360 }
3361 }
3362 }
3363
3364 void Node::set_req_X(uint i, Node *n, PhaseGVN *gvn) {
3365 PhaseIterGVN* igvn = gvn->is_IterGVN();
3366 if (igvn == nullptr) {
3367 set_req(i, n);
3368 return;
3369 }
3370 set_req_X(i, n, igvn);
3371 }
3372
3373 //-------------------------------replace_by-----------------------------------
3374 // Using def-use info, replace one node for another. Follow the def-use info
3375 // to all users of the OLD node. Then make all uses point to the NEW node.
3376 void Node::replace_by(Node *new_node) {
3377 assert(!is_top(), "top node has no DU info");
3378 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
3379 Node* use = last_out(i);
3380 uint uses_found = 0;
|