650 return longcon(l);
651 }
652
653
654 //------------------------------zerocon-----------------------------------------
655 // Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))"
656 ConNode* PhaseValues::zerocon(BasicType bt) {
657 assert((uint)bt <= _zcon_max, "domain check");
658 ConNode* zcon = _zcons[bt];
659 if (zcon != nullptr && zcon->in(TypeFunc::Control) != nullptr)
660 return zcon;
661 zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
662 _zcons[bt] = zcon;
663 return zcon;
664 }
665
666
667
668 //=============================================================================
669 Node* PhaseGVN::apply_ideal(Node* k, bool can_reshape) {
670 Node* i = BarrierSet::barrier_set()->barrier_set_c2()->ideal_node(this, k, can_reshape);
671 if (i == nullptr) {
672 i = k->Ideal(this, can_reshape);
673 }
674 return i;
675 }
676
677 //------------------------------transform--------------------------------------
678 // Return a node which computes the same function as this node, but
679 // in a faster or cheaper fashion.
680 Node* PhaseGVN::transform(Node* n) {
681 NOT_PRODUCT( set_transforms(); )
682
683 // Apply the Ideal call in a loop until it no longer applies
684 Node* k = n;
685 Node* i = apply_ideal(k, /*can_reshape=*/false);
686 NOT_PRODUCT(uint loop_count = 1;)
687 while (i != nullptr) {
688 assert(i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
689 k = i;
690 #ifdef ASSERT
691 if (loop_count >= K + C->live_nodes()) {
692 dump_infinite_loop_info(i, "PhaseGVN::transform");
693 }
694 #endif
2299 stack.push(in, PROCESS_INPUTS); // Recursively remove
2300 recurse = true;
2301 } else if (in->outcnt() == 1 &&
2302 in->has_special_unique_user()) {
2303 _worklist.push(in->unique_out());
2304 } else if (in->outcnt() <= 2 && dead->is_Phi()) {
2305 if (in->Opcode() == Op_Region) {
2306 _worklist.push(in);
2307 } else if (in->is_Store()) {
2308 DUIterator_Fast imax, i = in->fast_outs(imax);
2309 _worklist.push(in->fast_out(i));
2310 i++;
2311 if (in->outcnt() == 2) {
2312 _worklist.push(in->fast_out(i));
2313 i++;
2314 }
2315 assert(!(i < imax), "sanity");
2316 }
2317 } else if (dead->is_data_proj_of_pure_function(in)) {
2318 _worklist.push(in);
2319 } else {
2320 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(this, in);
2321 }
2322 if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
2323 in->is_Proj() && in->in(0) != nullptr && in->in(0)->is_Initialize()) {
2324 // A Load that directly follows an InitializeNode is
2325 // going away. The Stores that follow are candidates
2326 // again to be captured by the InitializeNode.
2327 for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
2328 Node *n = in->fast_out(j);
2329 if (n->is_Store()) {
2330 _worklist.push(n);
2331 }
2332 }
2333 }
2334 } // if (in != nullptr && in != C->top())
2335 } // for (uint i = 0; i < dead->req(); i++)
2336 if (recurse) {
2337 continue;
2338 }
2339 } // if (!dead->is_Con())
2340 } // if (progress_state == PROCESS_INPUTS)
2682 if (init != nullptr) {
2683 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2684 }
2685 }
2686 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
2687 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
2688 // to guarantee the change is not missed.
2689 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
2690 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
2691 if (p != nullptr) {
2692 add_users_to_worklist0(p, worklist);
2693 }
2694 }
2695
2696 if (use_op == Op_Initialize) {
2697 InitializeNode* init = use->as_Initialize();
2698 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2699 }
2700 // Loading the java mirror from a Klass requires two loads and the type
2701 // of the mirror load depends on the type of 'n'. See LoadNode::Value().
2702 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
2703 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2704 bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
2705
2706 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2707 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2708 Node* u = use->fast_out(i2);
2709 const Type* ut = u->bottom_type();
2710 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
2711 if (has_load_barrier_nodes) {
2712 // Search for load barriers behind the load
2713 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
2714 Node* b = u->fast_out(i3);
2715 if (bs->is_gc_barrier_node(b)) {
2716 worklist.push(b);
2717 }
2718 }
2719 }
2720 worklist.push(u);
2721 }
2722 }
2723 }
2724 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
2725 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
2726 if (use->outcnt() == 1) {
2727 Node* cmp = use->unique_out();
2728 worklist.push(cmp);
2729 }
2730 }
2731
2732 // From CastX2PNode::Ideal
2733 // CastX2P(AddX(x, y))
2734 // CastX2P(SubX(x, y))
2735 if (use->Opcode() == Op_AddX || use->Opcode() == Op_SubX) {
2736 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2737 Node* u = use->fast_out(i2);
2738 if (u->Opcode() == Op_CastX2P) {
2739 worklist.push(u);
3106 push_if_not_bottom_type(worklist, bol);
3107 }
3108 }
3109 }
3110
3111 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
3112 // Seem PhiNode::Value().
3113 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
3114 uint use_op = use->Opcode();
3115 if (use_op == Op_CmpI || use_op == Op_CmpL) {
3116 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
3117 if (phi != nullptr) {
3118 worklist.push(phi);
3119 }
3120 }
3121 }
3122
3123 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
3124 // See LoadNode::Value().
3125 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
3126 BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
3127 bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
3128
3129 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
3130 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3131 Node* loadp = use->fast_out(i);
3132 const Type* ut = loadp->bottom_type();
3133 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
3134 if (has_load_barrier_nodes) {
3135 // Search for load barriers behind the load
3136 push_load_barrier(worklist, barrier_set, loadp);
3137 }
3138 worklist.push(loadp);
3139 }
3140 }
3141 }
3142 }
3143
3144 void PhaseCCP::push_load_barrier(Unique_Node_List& worklist, const BarrierSetC2* barrier_set, const Node* use) {
3145 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3146 Node* barrier_node = use->fast_out(i);
3147 if (barrier_set->is_gc_barrier_node(barrier_node)) {
3148 worklist.push(barrier_node);
3149 }
3150 }
3151 }
3152
3153 // AndI/L::Value() optimizes patterns similar to (v << 2) & 3, or CON & 3 to zero if they are bitwise disjoint.
3154 // Add the AndI/L nodes back to the worklist to re-apply Value() in case the value is now a constant or shift
3155 // value changed.
3156 void PhaseCCP::push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const {
3157 const TypeInteger* parent_type = type(parent)->isa_integer(type(parent)->basic_type());
3158 uint use_op = use->Opcode();
3159 if (
3160 // Pattern: parent (now constant) -> (ConstraintCast | ConvI2L)* -> And
3161 (parent_type != nullptr && parent_type->is_con()) ||
3162 // Pattern: parent -> LShift (use) -> (ConstraintCast | ConvI2L)* -> And
3163 ((use_op == Op_LShiftI || use_op == Op_LShiftL) && use->in(2) == parent)) {
3164
3165 auto push_and_uses_to_worklist = [&](Node* n) {
3166 uint opc = n->Opcode();
3167 if (opc == Op_AndI || opc == Op_AndL) {
3168 push_if_not_bottom_type(worklist, n);
3169 }
3170 };
3171 auto is_boundary = [](Node* n) {
3172 return !(n->is_ConstraintCast() || n->Opcode() == Op_ConvI2L);
3493 break;
3494 case 1:
3495 if( old->is_Store() || old->has_special_unique_user() )
3496 igvn->add_users_to_worklist( old );
3497 break;
3498 case 2:
3499 if( old->is_Store() )
3500 igvn->add_users_to_worklist( old );
3501 if( old->Opcode() == Op_Region )
3502 igvn->_worklist.push(old);
3503 break;
3504 case 3:
3505 if( old->Opcode() == Op_Region ) {
3506 igvn->_worklist.push(old);
3507 igvn->add_users_to_worklist( old );
3508 }
3509 break;
3510 default:
3511 break;
3512 }
3513
3514 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(igvn, old);
3515 }
3516 }
3517
3518 void Node::set_req_X(uint i, Node *n, PhaseGVN *gvn) {
3519 PhaseIterGVN* igvn = gvn->is_IterGVN();
3520 if (igvn == nullptr) {
3521 set_req(i, n);
3522 return;
3523 }
3524 set_req_X(i, n, igvn);
3525 }
3526
3527 //-------------------------------replace_by-----------------------------------
3528 // Using def-use info, replace one node for another. Follow the def-use info
3529 // to all users of the OLD node. Then make all uses point to the NEW node.
3530 void Node::replace_by(Node *new_node) {
3531 assert(!is_top(), "top node has no DU info");
3532 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
3533 Node* use = last_out(i);
3534 uint uses_found = 0;
|
650 return longcon(l);
651 }
652
653
654 //------------------------------zerocon-----------------------------------------
655 // Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))"
656 ConNode* PhaseValues::zerocon(BasicType bt) {
657 assert((uint)bt <= _zcon_max, "domain check");
658 ConNode* zcon = _zcons[bt];
659 if (zcon != nullptr && zcon->in(TypeFunc::Control) != nullptr)
660 return zcon;
661 zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
662 _zcons[bt] = zcon;
663 return zcon;
664 }
665
666
667
668 //=============================================================================
669 Node* PhaseGVN::apply_ideal(Node* k, bool can_reshape) {
670 return k->Ideal(this, can_reshape);
671 }
672
673 //------------------------------transform--------------------------------------
674 // Return a node which computes the same function as this node, but
675 // in a faster or cheaper fashion.
676 Node* PhaseGVN::transform(Node* n) {
677 NOT_PRODUCT( set_transforms(); )
678
679 // Apply the Ideal call in a loop until it no longer applies
680 Node* k = n;
681 Node* i = apply_ideal(k, /*can_reshape=*/false);
682 NOT_PRODUCT(uint loop_count = 1;)
683 while (i != nullptr) {
684 assert(i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
685 k = i;
686 #ifdef ASSERT
687 if (loop_count >= K + C->live_nodes()) {
688 dump_infinite_loop_info(i, "PhaseGVN::transform");
689 }
690 #endif
2295 stack.push(in, PROCESS_INPUTS); // Recursively remove
2296 recurse = true;
2297 } else if (in->outcnt() == 1 &&
2298 in->has_special_unique_user()) {
2299 _worklist.push(in->unique_out());
2300 } else if (in->outcnt() <= 2 && dead->is_Phi()) {
2301 if (in->Opcode() == Op_Region) {
2302 _worklist.push(in);
2303 } else if (in->is_Store()) {
2304 DUIterator_Fast imax, i = in->fast_outs(imax);
2305 _worklist.push(in->fast_out(i));
2306 i++;
2307 if (in->outcnt() == 2) {
2308 _worklist.push(in->fast_out(i));
2309 i++;
2310 }
2311 assert(!(i < imax), "sanity");
2312 }
2313 } else if (dead->is_data_proj_of_pure_function(in)) {
2314 _worklist.push(in);
2315 }
2316 if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
2317 in->is_Proj() && in->in(0) != nullptr && in->in(0)->is_Initialize()) {
2318 // A Load that directly follows an InitializeNode is
2319 // going away. The Stores that follow are candidates
2320 // again to be captured by the InitializeNode.
2321 for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
2322 Node *n = in->fast_out(j);
2323 if (n->is_Store()) {
2324 _worklist.push(n);
2325 }
2326 }
2327 }
2328 } // if (in != nullptr && in != C->top())
2329 } // for (uint i = 0; i < dead->req(); i++)
2330 if (recurse) {
2331 continue;
2332 }
2333 } // if (!dead->is_Con())
2334 } // if (progress_state == PROCESS_INPUTS)
2676 if (init != nullptr) {
2677 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2678 }
2679 }
2680 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
2681 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
2682 // to guarantee the change is not missed.
2683 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
2684 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
2685 if (p != nullptr) {
2686 add_users_to_worklist0(p, worklist);
2687 }
2688 }
2689
2690 if (use_op == Op_Initialize) {
2691 InitializeNode* init = use->as_Initialize();
2692 init->for_each_proj(enqueue_init_mem_projs, TypeFunc::Memory);
2693 }
2694 // Loading the java mirror from a Klass requires two loads and the type
2695 // of the mirror load depends on the type of 'n'. See LoadNode::Value().
2696 // LoadP(LoadP(AddP(foo:Klass, #java_mirror)))
2697 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2698 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2699 Node* u = use->fast_out(i2);
2700 const Type* ut = u->bottom_type();
2701 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
2702 worklist.push(u);
2703 }
2704 }
2705 }
2706 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
2707 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
2708 if (use->outcnt() == 1) {
2709 Node* cmp = use->unique_out();
2710 worklist.push(cmp);
2711 }
2712 }
2713
2714 // From CastX2PNode::Ideal
2715 // CastX2P(AddX(x, y))
2716 // CastX2P(SubX(x, y))
2717 if (use->Opcode() == Op_AddX || use->Opcode() == Op_SubX) {
2718 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2719 Node* u = use->fast_out(i2);
2720 if (u->Opcode() == Op_CastX2P) {
2721 worklist.push(u);
3088 push_if_not_bottom_type(worklist, bol);
3089 }
3090 }
3091 }
3092
3093 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
3094 // Seem PhiNode::Value().
3095 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
3096 uint use_op = use->Opcode();
3097 if (use_op == Op_CmpI || use_op == Op_CmpL) {
3098 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
3099 if (phi != nullptr) {
3100 worklist.push(phi);
3101 }
3102 }
3103 }
3104
3105 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
3106 // See LoadNode::Value().
3107 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
3108 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
3109 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
3110 Node* loadp = use->fast_out(i);
3111 const Type* ut = loadp->bottom_type();
3112 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
3113 worklist.push(loadp);
3114 }
3115 }
3116 }
3117 }
3118
3119 // AndI/L::Value() optimizes patterns similar to (v << 2) & 3, or CON & 3 to zero if they are bitwise disjoint.
3120 // Add the AndI/L nodes back to the worklist to re-apply Value() in case the value is now a constant or shift
3121 // value changed.
3122 void PhaseCCP::push_and(Unique_Node_List& worklist, const Node* parent, const Node* use) const {
3123 const TypeInteger* parent_type = type(parent)->isa_integer(type(parent)->basic_type());
3124 uint use_op = use->Opcode();
3125 if (
3126 // Pattern: parent (now constant) -> (ConstraintCast | ConvI2L)* -> And
3127 (parent_type != nullptr && parent_type->is_con()) ||
3128 // Pattern: parent -> LShift (use) -> (ConstraintCast | ConvI2L)* -> And
3129 ((use_op == Op_LShiftI || use_op == Op_LShiftL) && use->in(2) == parent)) {
3130
3131 auto push_and_uses_to_worklist = [&](Node* n) {
3132 uint opc = n->Opcode();
3133 if (opc == Op_AndI || opc == Op_AndL) {
3134 push_if_not_bottom_type(worklist, n);
3135 }
3136 };
3137 auto is_boundary = [](Node* n) {
3138 return !(n->is_ConstraintCast() || n->Opcode() == Op_ConvI2L);
3459 break;
3460 case 1:
3461 if( old->is_Store() || old->has_special_unique_user() )
3462 igvn->add_users_to_worklist( old );
3463 break;
3464 case 2:
3465 if( old->is_Store() )
3466 igvn->add_users_to_worklist( old );
3467 if( old->Opcode() == Op_Region )
3468 igvn->_worklist.push(old);
3469 break;
3470 case 3:
3471 if( old->Opcode() == Op_Region ) {
3472 igvn->_worklist.push(old);
3473 igvn->add_users_to_worklist( old );
3474 }
3475 break;
3476 default:
3477 break;
3478 }
3479 }
3480 }
3481
3482 void Node::set_req_X(uint i, Node *n, PhaseGVN *gvn) {
3483 PhaseIterGVN* igvn = gvn->is_IterGVN();
3484 if (igvn == nullptr) {
3485 set_req(i, n);
3486 return;
3487 }
3488 set_req_X(i, n, igvn);
3489 }
3490
3491 //-------------------------------replace_by-----------------------------------
3492 // Using def-use info, replace one node for another. Follow the def-use info
3493 // to all users of the OLD node. Then make all uses point to the NEW node.
3494 void Node::replace_by(Node *new_node) {
3495 assert(!is_top(), "top node has no DU info");
3496 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
3497 Node* use = last_out(i);
3498 uint uses_found = 0;
|