< prev index next >

src/hotspot/share/opto/loopnode.cpp

Print this page




1576       }
1577     }
1578   }
1579 
1580   if (iv_phi != NULL) {
1581     // Now adjust the inner loop's exit condition
1582     Node* limit = inner_cl->limit();
1583     Node* sub = NULL;
1584     if (stride > 0) {
1585       sub = igvn->transform(new SubINode(limit, iv_phi));
1586     } else {
1587       sub = igvn->transform(new SubINode(iv_phi, limit));
1588     }
1589     Node* min = igvn->transform(new MinINode(sub, igvn->intcon(scaled_iters)));
1590     Node* new_limit = NULL;
1591     if (stride > 0) {
1592       new_limit = igvn->transform(new AddINode(min, iv_phi));
1593     } else {
1594       new_limit = igvn->transform(new SubINode(iv_phi, min));
1595     }
1596     Node* cmp = inner_cle->cmp_node()->clone();
1597     igvn->replace_input_of(cmp, 2, new_limit);
1598     Node* bol = inner_cle->in(CountedLoopEndNode::TestValue)->clone();
1599     cmp->set_req(2, limit);
1600     bol->set_req(1, igvn->transform(cmp));
1601     igvn->replace_input_of(outer_loop_end(), 1, igvn->transform(bol));





1602   } else {
1603     assert(false, "should be able to adjust outer loop");
1604     IfNode* outer_le = outer_loop_end();
1605     Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
1606     igvn->replace_node(outer_le, iff);
1607     inner_cl->clear_strip_mined();
1608   }
1609 }
1610 
1611 const Type* OuterStripMinedLoopEndNode::Value(PhaseGVN* phase) const {
1612   if (!in(0)) return Type::TOP;
1613   if (phase->type(in(0)) == Type::TOP)
1614     return Type::TOP;
1615 
1616   return TypeTuple::IFBOTH;
1617 }
1618 
1619 Node *OuterStripMinedLoopEndNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1620   if (remove_dead_region(phase, can_reshape))  return this;
1621 


2695           _igvn.hash_insert(n2);
2696           _igvn._worklist.push(n2);
2697           progress = true;
2698         }
2699       }
2700     }
2701   }
2702 
2703   return progress;
2704 }
2705 
2706 
2707 //=============================================================================
2708 //----------------------------build_and_optimize-------------------------------
2709 // Create a PhaseLoop.  Build the ideal Loop tree.  Map each Ideal Node to
2710 // its corresponding LoopNode.  If 'optimize' is true, do some loop cleanups.
2711 void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
2712   bool do_split_ifs = (mode == LoopOptsDefault || mode == LoopOptsLastRound);
2713   bool skip_loop_opts = (mode == LoopOptsNone);
2714 
2715   ResourceMark rm;
2716 
2717   int old_progress = C->major_progress();
2718   uint orig_worklist_size = _igvn._worklist.size();
2719 
2720   // Reset major-progress flag for the driver's heuristics
2721   C->clear_major_progress();
2722 
2723 #ifndef PRODUCT
2724   // Capture for later assert
2725   uint unique = C->unique();
2726   _loop_invokes++;
2727   _loop_work += unique;
2728 #endif
2729 
2730   // True if the method has at least 1 irreducible loop
2731   _has_irreducible_loops = false;
2732 
2733   _created_loop_node = false;
2734 
2735   Arena *a = Thread::current()->resource_area();
2736   VectorSet visited(a);


3951     compute_lca_of_uses(n, early, true);
3952   }
3953 #endif
3954 
3955   // if this is a load, check for anti-dependent stores
3956   // We use a conservative algorithm to identify potential interfering
3957   // instructions and for rescheduling the load.  The users of the memory
3958   // input of this load are examined.  Any use which is not a load and is
3959   // dominated by early is considered a potentially interfering store.
3960   // This can produce false positives.
3961   if (n->is_Load() && LCA != early) {
3962     Node_List worklist;
3963 
3964     Node *mem = n->in(MemNode::Memory);
3965     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
3966       Node* s = mem->fast_out(i);
3967       worklist.push(s);
3968     }
3969     while(worklist.size() != 0 && LCA != early) {
3970       Node* s = worklist.pop();
3971       if (s->is_Load() || s->is_ShenandoahBarrier() || s->Opcode() == Op_SafePoint ||
3972           (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) {
3973         continue;
3974       } else if (s->is_MergeMem()) {
3975         for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
3976           Node* s1 = s->fast_out(i);
3977           worklist.push(s1);
3978         }
3979       } else {
3980         Node *sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0);
3981         assert(sctrl != NULL || s->outcnt() == 0, "must have control");
3982         if (sctrl != NULL && !sctrl->is_top() && is_dominator(early, sctrl)) {
3983           LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n);
3984         }
3985       }
3986     }
3987   }
3988 
3989   assert(LCA == find_non_split_ctrl(LCA), "unexpected late control");
3990   return LCA;
3991 }
3992 




1576       }
1577     }
1578   }
1579 
1580   if (iv_phi != NULL) {
1581     // Now adjust the inner loop's exit condition
1582     Node* limit = inner_cl->limit();
1583     Node* sub = NULL;
1584     if (stride > 0) {
1585       sub = igvn->transform(new SubINode(limit, iv_phi));
1586     } else {
1587       sub = igvn->transform(new SubINode(iv_phi, limit));
1588     }
1589     Node* min = igvn->transform(new MinINode(sub, igvn->intcon(scaled_iters)));
1590     Node* new_limit = NULL;
1591     if (stride > 0) {
1592       new_limit = igvn->transform(new AddINode(min, iv_phi));
1593     } else {
1594       new_limit = igvn->transform(new SubINode(iv_phi, min));
1595     }
1596     Node* inner_cmp = inner_cle->cmp_node();
1597     Node* inner_bol = inner_cle->in(CountedLoopEndNode::TestValue);
1598     Node* outer_bol = inner_bol;
1599     // cmp node for inner loop may be shared
1600     inner_cmp = inner_cmp->clone();
1601     inner_cmp->set_req(2, new_limit);
1602     inner_bol = inner_bol->clone();
1603     inner_bol->set_req(1, igvn->transform(inner_cmp));
1604     igvn->replace_input_of(inner_cle, CountedLoopEndNode::TestValue, igvn->transform(inner_bol));
1605     // Set the outer loop's exit condition too
1606     igvn->replace_input_of(outer_loop_end(), 1, outer_bol);
1607   } else {
1608     assert(false, "should be able to adjust outer loop");
1609     IfNode* outer_le = outer_loop_end();
1610     Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
1611     igvn->replace_node(outer_le, iff);
1612     inner_cl->clear_strip_mined();
1613   }
1614 }
1615 
1616 const Type* OuterStripMinedLoopEndNode::Value(PhaseGVN* phase) const {
1617   if (!in(0)) return Type::TOP;
1618   if (phase->type(in(0)) == Type::TOP)
1619     return Type::TOP;
1620 
1621   return TypeTuple::IFBOTH;
1622 }
1623 
1624 Node *OuterStripMinedLoopEndNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1625   if (remove_dead_region(phase, can_reshape))  return this;
1626 


2700           _igvn.hash_insert(n2);
2701           _igvn._worklist.push(n2);
2702           progress = true;
2703         }
2704       }
2705     }
2706   }
2707 
2708   return progress;
2709 }
2710 
2711 
2712 //=============================================================================
2713 //----------------------------build_and_optimize-------------------------------
2714 // Create a PhaseLoop.  Build the ideal Loop tree.  Map each Ideal Node to
2715 // its corresponding LoopNode.  If 'optimize' is true, do some loop cleanups.
2716 void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
2717   bool do_split_ifs = (mode == LoopOptsDefault || mode == LoopOptsLastRound);
2718   bool skip_loop_opts = (mode == LoopOptsNone);
2719 


2720   int old_progress = C->major_progress();
2721   uint orig_worklist_size = _igvn._worklist.size();
2722 
2723   // Reset major-progress flag for the driver's heuristics
2724   C->clear_major_progress();
2725 
2726 #ifndef PRODUCT
2727   // Capture for later assert
2728   uint unique = C->unique();
2729   _loop_invokes++;
2730   _loop_work += unique;
2731 #endif
2732 
2733   // True if the method has at least 1 irreducible loop
2734   _has_irreducible_loops = false;
2735 
2736   _created_loop_node = false;
2737 
2738   Arena *a = Thread::current()->resource_area();
2739   VectorSet visited(a);


3954     compute_lca_of_uses(n, early, true);
3955   }
3956 #endif
3957 
3958   // if this is a load, check for anti-dependent stores
3959   // We use a conservative algorithm to identify potential interfering
3960   // instructions and for rescheduling the load.  The users of the memory
3961   // input of this load are examined.  Any use which is not a load and is
3962   // dominated by early is considered a potentially interfering store.
3963   // This can produce false positives.
3964   if (n->is_Load() && LCA != early) {
3965     Node_List worklist;
3966 
3967     Node *mem = n->in(MemNode::Memory);
3968     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
3969       Node* s = mem->fast_out(i);
3970       worklist.push(s);
3971     }
3972     while(worklist.size() != 0 && LCA != early) {
3973       Node* s = worklist.pop();
3974       if (s->is_Load() || s->Opcode() == Op_SafePoint ||
3975           (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0) || s->is_Phi()) {
3976         continue;
3977       } else if (s->is_MergeMem()) {
3978         for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
3979           Node* s1 = s->fast_out(i);
3980           worklist.push(s1);
3981         }
3982       } else {
3983         Node *sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0);
3984         assert(sctrl != NULL || s->outcnt() == 0, "must have control");
3985         if (sctrl != NULL && !sctrl->is_top() && is_dominator(early, sctrl)) {
3986           LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n);
3987         }
3988       }
3989     }
3990   }
3991 
3992   assert(LCA == find_non_split_ctrl(LCA), "unexpected late control");
3993   return LCA;
3994 }
3995 


< prev index next >