< prev index next >

src/hotspot/share/opto/loopnode.cpp

Print this page




  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.inline.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/connode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/divnode.hpp"
  38 #include "opto/idealGraphPrinter.hpp"
  39 #include "opto/loopnode.hpp"
  40 #include "opto/mulnode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/superword.hpp"




  43 
  44 //=============================================================================
  45 //------------------------------is_loop_iv-------------------------------------
  46 // Determine if a node is Counted loop induction variable.
  47 // The method is declared in node.hpp.
  48 const Node* Node::is_loop_iv() const {
  49   if (this->is_Phi() && !this->as_Phi()->is_copy() &&
  50       this->as_Phi()->region()->is_CountedLoop() &&
  51       this->as_Phi()->region()->as_CountedLoop()->phi() == this) {
  52     return this;
  53   } else {
  54     return NULL;
  55   }
  56 }
  57 
  58 //=============================================================================
  59 //------------------------------dump_spec--------------------------------------
  60 // Dump special per-node info
  61 #ifndef PRODUCT
  62 void LoopNode::dump_spec(outputStream *st) const {


2699           n2->set_req(0, c2);
2700           _igvn.hash_insert(n2);
2701           _igvn._worklist.push(n2);
2702           progress = true;
2703         }
2704       }
2705     }
2706   }
2707 
2708   return progress;
2709 }
2710 
2711 
2712 //=============================================================================
2713 //----------------------------build_and_optimize-------------------------------
2714 // Create a PhaseLoop.  Build the ideal Loop tree.  Map each Ideal Node to
2715 // its corresponding LoopNode.  If 'optimize' is true, do some loop cleanups.
2716 void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
2717   bool do_split_ifs = (mode == LoopOptsDefault || mode == LoopOptsLastRound);
2718   bool skip_loop_opts = (mode == LoopOptsNone);


2719 
2720   ResourceMark rm;
2721 
2722   int old_progress = C->major_progress();
2723   uint orig_worklist_size = _igvn._worklist.size();
2724 
2725   // Reset major-progress flag for the driver's heuristics
2726   C->clear_major_progress();
2727 
2728 #ifndef PRODUCT
2729   // Capture for later assert
2730   uint unique = C->unique();
2731   _loop_invokes++;
2732   _loop_work += unique;
2733 #endif
2734 
2735   // True if the method has at least 1 irreducible loop
2736   _has_irreducible_loops = false;
2737 
2738   _created_loop_node = false;


2763   build_loop_tree();
2764   // Check for bailout, and return
2765   if (C->failing()) {
2766     return;
2767   }
2768 
2769   // No loops after all
2770   if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false);
2771 
2772   // There should always be an outer loop containing the Root and Return nodes.
2773   // If not, we have a degenerate empty program.  Bail out in this case.
2774   if (!has_node(C->root())) {
2775     if (!_verify_only) {
2776       C->clear_major_progress();
2777       C->record_method_not_compilable("empty program detected during loop optimization");
2778     }
2779     return;
2780   }
2781 
2782   // Nothing to do, so get out
2783   bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only;
2784   bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn);
2785   if (stop_early && !do_expensive_nodes) {
2786     _igvn.optimize();           // Cleanup NeverBranches
2787     return;
2788   }
2789 
2790   // Set loop nesting depth
2791   _ltree_root->set_nest( 0 );
2792 
2793   // Split shared headers and insert loop landing pads.
2794   // Do not bother doing this on the Root loop of course.
2795   if( !_verify_me && !_verify_only && _ltree_root->_child ) {
2796     C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3);
2797     if( _ltree_root->_child->beautify_loops( this ) ) {
2798       // Re-build loop tree!
2799       _ltree_root->_child = NULL;
2800       _nodes.clear();
2801       reallocate_preorders();
2802       build_loop_tree();
2803       // Check for bailout, and return


2842 
2843   // Walk the DATA nodes and place into loops.  Find earliest control
2844   // node.  For CFG nodes, the _nodes array starts out and remains
2845   // holding the associated IdealLoopTree pointer.  For DATA nodes, the
2846   // _nodes array holds the earliest legal controlling CFG node.
2847 
2848   // Allocate stack with enough space to avoid frequent realloc
2849   int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats
2850   Node_Stack nstack( a, stack_size );
2851 
2852   visited.Clear();
2853   Node_List worklist(a);
2854   // Don't need C->root() on worklist since
2855   // it will be processed among C->top() inputs
2856   worklist.push( C->top() );
2857   visited.set( C->top()->_idx ); // Set C->top() as visited now
2858   build_loop_early( visited, worklist, nstack );
2859 
2860   // Given early legal placement, try finding counted loops.  This placement
2861   // is good enough to discover most loop invariants.
2862   if( !_verify_me && !_verify_only )
2863     _ltree_root->counted_loop( this );
2864 
2865   // Find latest loop placement.  Find ideal loop placement.
2866   visited.Clear();
2867   init_dom_lca_tags();
2868   // Need C->root() on worklist when processing outs
2869   worklist.push( C->root() );
2870   NOT_PRODUCT( C->verify_graph_edges(); )
2871   worklist.push( C->top() );
2872   build_loop_late( visited, worklist, nstack );
2873 
2874   if (_verify_only) {
2875     // restore major progress flag
2876     for (int i = 0; i < old_progress; i++)
2877       C->set_major_progress();
2878     assert(C->unique() == unique, "verification mode made Nodes? ? ?");
2879     assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything");
2880     return;
2881   }
2882 


2914   if(TraceLoopOpts && C->has_loops()) {
2915     _ltree_root->dump();
2916   }
2917 #endif
2918 
2919   if (skip_loop_opts) {
2920     // restore major progress flag
2921     for (int i = 0; i < old_progress; i++) {
2922       C->set_major_progress();
2923     }
2924 
2925     // Cleanup any modified bits
2926     _igvn.optimize();
2927 
2928     if (C->log() != NULL) {
2929       log_loop_tree(_ltree_root, _ltree_root, C->log());
2930     }
2931     return;
2932   }
2933 










2934   if (ReassociateInvariants) {
2935     // Reassociate invariants and prep for split_thru_phi
2936     for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2937       IdealLoopTree* lpt = iter.current();
2938       bool is_counted = lpt->is_counted();
2939       if (!is_counted || !lpt->is_inner()) continue;
2940 
2941       // check for vectorized loops, any reassociation of invariants was already done
2942       if (is_counted && lpt->_head->as_CountedLoop()->do_unroll_only()) continue;
2943 
2944       lpt->reassociate_invariants(this);
2945 
2946       // Because RCE opportunities can be masked by split_thru_phi,
2947       // look for RCE candidates and inhibit split_thru_phi
2948       // on just their loop-phi's for this pass of loop opts
2949       if (SplitIfBlocks && do_split_ifs) {
2950         if (lpt->policy_range_check(this)) {
2951           lpt->_rce_candidate = 1; // = true
2952         }
2953       }


3944     compute_lca_of_uses(n, early, true);
3945   }
3946 #endif
3947 
3948   // if this is a load, check for anti-dependent stores
3949   // We use a conservative algorithm to identify potential interfering
3950   // instructions and for rescheduling the load.  The users of the memory
3951   // input of this load are examined.  Any use which is not a load and is
3952   // dominated by early is considered a potentially interfering store.
3953   // This can produce false positives.
3954   if (n->is_Load() && LCA != early) {
3955     Node_List worklist;
3956 
3957     Node *mem = n->in(MemNode::Memory);
3958     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
3959       Node* s = mem->fast_out(i);
3960       worklist.push(s);
3961     }
3962     while(worklist.size() != 0 && LCA != early) {
3963       Node* s = worklist.pop();
3964       if (s->is_Load() || s->Opcode() == Op_SafePoint) {

3965         continue;
3966       } else if (s->is_MergeMem()) {
3967         for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
3968           Node* s1 = s->fast_out(i);
3969           worklist.push(s1);
3970         }
3971       } else {
3972         Node *sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0);
3973         assert(sctrl != NULL || s->outcnt() == 0, "must have control");
3974         if (sctrl != NULL && !sctrl->is_top() && is_dominator(early, sctrl)) {
3975           LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n);
3976         }
3977       }
3978     }
3979   }
3980 
3981   assert(LCA == find_non_split_ctrl(LCA), "unexpected late control");
3982   return LCA;
3983 }
3984 


4132         }
4133         // Get saved parent node and next use's index. Visit the rest of uses.
4134         n   = nstack.node();
4135         cnt = n->outcnt();
4136         i   = nstack.index();
4137         nstack.pop();
4138       }
4139     }
4140   }
4141 }
4142 
4143 // Verify that no data node is schedules in the outer loop of a strip
4144 // mined loop.
4145 void PhaseIdealLoop::verify_strip_mined_scheduling(Node *n, Node* least) {
4146 #ifdef ASSERT
4147   if (get_loop(least)->_nest == 0) {
4148     return;
4149   }
4150   IdealLoopTree* loop = get_loop(least);
4151   Node* head = loop->_head;
4152   if (head->is_OuterStripMinedLoop()) {


4153     Node* sfpt = head->as_Loop()->outer_safepoint();
4154     ResourceMark rm;
4155     Unique_Node_List wq;
4156     wq.push(sfpt);
4157     for (uint i = 0; i < wq.size(); i++) {
4158       Node *m = wq.at(i);
4159       for (uint i = 1; i < m->req(); i++) {
4160         Node* nn = m->in(i);
4161         if (nn == n) {
4162           return;
4163         }
4164         if (nn != NULL && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) {
4165           wq.push(nn);
4166         }
4167       }
4168     }
4169     ShouldNotReachHere();
4170   }
4171 #endif
4172 }


4212     case Op_LoadI:
4213     case Op_LoadKlass:
4214     case Op_LoadNKlass:
4215     case Op_LoadL:
4216     case Op_LoadS:
4217     case Op_LoadP:
4218     case Op_LoadBarrierSlowReg:
4219     case Op_LoadBarrierWeakSlowReg:
4220     case Op_LoadN:
4221     case Op_LoadRange:
4222     case Op_LoadD_unaligned:
4223     case Op_LoadL_unaligned:
4224     case Op_StrComp:            // Does a bunch of load-like effects
4225     case Op_StrEquals:
4226     case Op_StrIndexOf:
4227     case Op_StrIndexOfChar:
4228     case Op_AryEq:
4229     case Op_HasNegatives:
4230       pinned = false;
4231     }



4232     if( pinned ) {
4233       IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n));
4234       if( !chosen_loop->_child )       // Inner loop?
4235         chosen_loop->_body.push(n); // Collect inner loops
4236       return;
4237     }
4238   } else {                      // No slot zero
4239     if( n->is_CFG() ) {         // CFG with no slot 0 is dead
4240       _nodes.map(n->_idx,0);    // No block setting, it's globally dead
4241       return;
4242     }
4243     assert(!n->is_CFG() || n->outcnt() == 0, "");
4244   }
4245 
4246   // Do I have a "safe range" I can select over?
4247   Node *early = get_ctrl(n);// Early location already computed
4248 
4249   // Compute latest point this Node can go
4250   Node *LCA = get_late_ctrl( n, early );
4251   // LCA is NULL due to uses being dead


4476     }
4477     // Dump nodes it controls
4478     for( uint k = 0; k < _nodes.Size(); k++ ) {
4479       // (k < C->unique() && get_ctrl(find(k)) == n)
4480       if (k < C->unique() && _nodes[k] == (Node*)((intptr_t)n + 1)) {
4481         Node *m = C->root()->find(k);
4482         if( m && m->outcnt() > 0 ) {
4483           if (!(has_ctrl(m) && get_ctrl_no_update(m) == n)) {
4484             tty->print_cr("*** BROKEN CTRL ACCESSOR!  _nodes[k] is %p, ctrl is %p",
4485                           _nodes[k], has_ctrl(m) ? get_ctrl_no_update(m) : NULL);
4486           }
4487           for( uint j = 0; j < loop->_nest; j++ )
4488             tty->print("  ");
4489           tty->print(" ");
4490           m->dump();
4491         }
4492       }
4493     }
4494   }
4495 }

4496 
4497 // Collect a R-P-O for the whole CFG.
4498 // Result list is in post-order (scan backwards for RPO)
4499 void PhaseIdealLoop::rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const {
4500   stk.push(start, 0);
4501   visited.set(start->_idx);
4502 
4503   while (stk.is_nonempty()) {
4504     Node* m   = stk.node();
4505     uint  idx = stk.index();
4506     if (idx < m->outcnt()) {
4507       stk.set_index(idx + 1);
4508       Node* n = m->raw_out(idx);
4509       if (n->is_CFG() && !visited.test_set(n->_idx)) {
4510         stk.push(n, 0);
4511       }
4512     } else {
4513       rpo_list.push(m);
4514       stk.pop();
4515     }
4516   }
4517 }
4518 #endif
4519 
4520 
4521 //=============================================================================
4522 //------------------------------LoopTreeIterator-----------------------------------
4523 
4524 // Advance to next loop tree using a preorder, left-to-right traversal.
4525 void LoopTreeIterator::next() {
4526   assert(!done(), "must not be done.");
4527   if (_curnt->_child != NULL) {
4528     _curnt = _curnt->_child;
4529   } else if (_curnt->_next != NULL) {
4530     _curnt = _curnt->_next;
4531   } else {
4532     while (_curnt != _root && _curnt->_next == NULL) {
4533       _curnt = _curnt->_parent;
4534     }
4535     if (_curnt == _root) {
4536       _curnt = NULL;
4537       assert(done(), "must be done.");
4538     } else {


  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.inline.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/connode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/divnode.hpp"
  38 #include "opto/idealGraphPrinter.hpp"
  39 #include "opto/loopnode.hpp"
  40 #include "opto/mulnode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/superword.hpp"
  43 #include "utilities/macros.hpp"
  44 #if INCLUDE_SHENANDOAHGC
  45 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  46 #endif
  47 
  48 //=============================================================================
  49 //------------------------------is_loop_iv-------------------------------------
  50 // Determine if a node is Counted loop induction variable.
  51 // The method is declared in node.hpp.
  52 const Node* Node::is_loop_iv() const {
  53   if (this->is_Phi() && !this->as_Phi()->is_copy() &&
  54       this->as_Phi()->region()->is_CountedLoop() &&
  55       this->as_Phi()->region()->as_CountedLoop()->phi() == this) {
  56     return this;
  57   } else {
  58     return NULL;
  59   }
  60 }
  61 
  62 //=============================================================================
  63 //------------------------------dump_spec--------------------------------------
  64 // Dump special per-node info
  65 #ifndef PRODUCT
  66 void LoopNode::dump_spec(outputStream *st) const {


2703           n2->set_req(0, c2);
2704           _igvn.hash_insert(n2);
2705           _igvn._worklist.push(n2);
2706           progress = true;
2707         }
2708       }
2709     }
2710   }
2711 
2712   return progress;
2713 }
2714 
2715 
2716 //=============================================================================
2717 //----------------------------build_and_optimize-------------------------------
2718 // Create a PhaseLoop.  Build the ideal Loop tree.  Map each Ideal Node to
2719 // its corresponding LoopNode.  If 'optimize' is true, do some loop cleanups.
2720 void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
2721   bool do_split_ifs = (mode == LoopOptsDefault || mode == LoopOptsLastRound);
2722   bool skip_loop_opts = (mode == LoopOptsNone);
2723   bool shenandoah_opts = (mode == LoopOptsShenandoahExpand ||
2724                           mode == LoopOptsShenandoahPostExpand);
2725 
2726   ResourceMark rm;
2727 
2728   int old_progress = C->major_progress();
2729   uint orig_worklist_size = _igvn._worklist.size();
2730 
2731   // Reset major-progress flag for the driver's heuristics
2732   C->clear_major_progress();
2733 
2734 #ifndef PRODUCT
2735   // Capture for later assert
2736   uint unique = C->unique();
2737   _loop_invokes++;
2738   _loop_work += unique;
2739 #endif
2740 
2741   // True if the method has at least 1 irreducible loop
2742   _has_irreducible_loops = false;
2743 
2744   _created_loop_node = false;


2769   build_loop_tree();
2770   // Check for bailout, and return
2771   if (C->failing()) {
2772     return;
2773   }
2774 
2775   // No loops after all
2776   if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false);
2777 
2778   // There should always be an outer loop containing the Root and Return nodes.
2779   // If not, we have a degenerate empty program.  Bail out in this case.
2780   if (!has_node(C->root())) {
2781     if (!_verify_only) {
2782       C->clear_major_progress();
2783       C->record_method_not_compilable("empty program detected during loop optimization");
2784     }
2785     return;
2786   }
2787 
2788   // Nothing to do, so get out
2789   bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only && !shenandoah_opts;
2790   bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn);
2791   if (stop_early && !do_expensive_nodes) {
2792     _igvn.optimize();           // Cleanup NeverBranches
2793     return;
2794   }
2795 
2796   // Set loop nesting depth
2797   _ltree_root->set_nest( 0 );
2798 
2799   // Split shared headers and insert loop landing pads.
2800   // Do not bother doing this on the Root loop of course.
2801   if( !_verify_me && !_verify_only && _ltree_root->_child ) {
2802     C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3);
2803     if( _ltree_root->_child->beautify_loops( this ) ) {
2804       // Re-build loop tree!
2805       _ltree_root->_child = NULL;
2806       _nodes.clear();
2807       reallocate_preorders();
2808       build_loop_tree();
2809       // Check for bailout, and return


2848 
2849   // Walk the DATA nodes and place into loops.  Find earliest control
2850   // node.  For CFG nodes, the _nodes array starts out and remains
2851   // holding the associated IdealLoopTree pointer.  For DATA nodes, the
2852   // _nodes array holds the earliest legal controlling CFG node.
2853 
2854   // Allocate stack with enough space to avoid frequent realloc
2855   int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats
2856   Node_Stack nstack( a, stack_size );
2857 
2858   visited.Clear();
2859   Node_List worklist(a);
2860   // Don't need C->root() on worklist since
2861   // it will be processed among C->top() inputs
2862   worklist.push( C->top() );
2863   visited.set( C->top()->_idx ); // Set C->top() as visited now
2864   build_loop_early( visited, worklist, nstack );
2865 
2866   // Given early legal placement, try finding counted loops.  This placement
2867   // is good enough to discover most loop invariants.
2868   if( !_verify_me && !_verify_only && !shenandoah_opts)
2869     _ltree_root->counted_loop( this );
2870 
2871   // Find latest loop placement.  Find ideal loop placement.
2872   visited.Clear();
2873   init_dom_lca_tags();
2874   // Need C->root() on worklist when processing outs
2875   worklist.push( C->root() );
2876   NOT_PRODUCT( C->verify_graph_edges(); )
2877   worklist.push( C->top() );
2878   build_loop_late( visited, worklist, nstack );
2879 
2880   if (_verify_only) {
2881     // restore major progress flag
2882     for (int i = 0; i < old_progress; i++)
2883       C->set_major_progress();
2884     assert(C->unique() == unique, "verification mode made Nodes? ? ?");
2885     assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything");
2886     return;
2887   }
2888 


2920   if(TraceLoopOpts && C->has_loops()) {
2921     _ltree_root->dump();
2922   }
2923 #endif
2924 
2925   if (skip_loop_opts) {
2926     // restore major progress flag
2927     for (int i = 0; i < old_progress; i++) {
2928       C->set_major_progress();
2929     }
2930 
2931     // Cleanup any modified bits
2932     _igvn.optimize();
2933 
2934     if (C->log() != NULL) {
2935       log_loop_tree(_ltree_root, _ltree_root, C->log());
2936     }
2937     return;
2938   }
2939 
2940 #if INCLUDE_SHENANDOAHGC
2941   if (UseShenandoahGC && ((ShenandoahBarrierSetC2*) BarrierSet::barrier_set()->barrier_set_c2())->optimize_loops(this, mode, visited, nstack, worklist)) {
2942     _igvn.optimize();
2943     if (C->log() != NULL) {
2944       log_loop_tree(_ltree_root, _ltree_root, C->log());
2945     }
2946     return;
2947   }
2948 #endif
2949 
2950   if (ReassociateInvariants) {
2951     // Reassociate invariants and prep for split_thru_phi
2952     for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2953       IdealLoopTree* lpt = iter.current();
2954       bool is_counted = lpt->is_counted();
2955       if (!is_counted || !lpt->is_inner()) continue;
2956 
2957       // check for vectorized loops, any reassociation of invariants was already done
2958       if (is_counted && lpt->_head->as_CountedLoop()->do_unroll_only()) continue;
2959 
2960       lpt->reassociate_invariants(this);
2961 
2962       // Because RCE opportunities can be masked by split_thru_phi,
2963       // look for RCE candidates and inhibit split_thru_phi
2964       // on just their loop-phi's for this pass of loop opts
2965       if (SplitIfBlocks && do_split_ifs) {
2966         if (lpt->policy_range_check(this)) {
2967           lpt->_rce_candidate = 1; // = true
2968         }
2969       }


3960     compute_lca_of_uses(n, early, true);
3961   }
3962 #endif
3963 
3964   // if this is a load, check for anti-dependent stores
3965   // We use a conservative algorithm to identify potential interfering
3966   // instructions and for rescheduling the load.  The users of the memory
3967   // input of this load are examined.  Any use which is not a load and is
3968   // dominated by early is considered a potentially interfering store.
3969   // This can produce false positives.
3970   if (n->is_Load() && LCA != early) {
3971     Node_List worklist;
3972 
3973     Node *mem = n->in(MemNode::Memory);
3974     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
3975       Node* s = mem->fast_out(i);
3976       worklist.push(s);
3977     }
3978     while(worklist.size() != 0 && LCA != early) {
3979       Node* s = worklist.pop();
3980       if (s->is_Load() || s->is_ShenandoahBarrier() || s->Opcode() == Op_SafePoint ||
3981           (UseShenandoahGC && s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) {
3982         continue;
3983       } else if (s->is_MergeMem()) {
3984         for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
3985           Node* s1 = s->fast_out(i);
3986           worklist.push(s1);
3987         }
3988       } else {
3989         Node *sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0);
3990         assert(sctrl != NULL || s->outcnt() == 0, "must have control");
3991         if (sctrl != NULL && !sctrl->is_top() && is_dominator(early, sctrl)) {
3992           LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n);
3993         }
3994       }
3995     }
3996   }
3997 
3998   assert(LCA == find_non_split_ctrl(LCA), "unexpected late control");
3999   return LCA;
4000 }
4001 


4149         }
4150         // Get saved parent node and next use's index. Visit the rest of uses.
4151         n   = nstack.node();
4152         cnt = n->outcnt();
4153         i   = nstack.index();
4154         nstack.pop();
4155       }
4156     }
4157   }
4158 }
4159 
4160 // Verify that no data node is schedules in the outer loop of a strip
4161 // mined loop.
4162 void PhaseIdealLoop::verify_strip_mined_scheduling(Node *n, Node* least) {
4163 #ifdef ASSERT
4164   if (get_loop(least)->_nest == 0) {
4165     return;
4166   }
4167   IdealLoopTree* loop = get_loop(least);
4168   Node* head = loop->_head;
4169   if (head->is_OuterStripMinedLoop() &&
4170       // Verification can't be applied to fully built strip mined loops
4171       head->as_Loop()->outer_loop_end()->in(1)->find_int_con(-1) == 0) {
4172     Node* sfpt = head->as_Loop()->outer_safepoint();
4173     ResourceMark rm;
4174     Unique_Node_List wq;
4175     wq.push(sfpt);
4176     for (uint i = 0; i < wq.size(); i++) {
4177       Node *m = wq.at(i);
4178       for (uint i = 1; i < m->req(); i++) {
4179         Node* nn = m->in(i);
4180         if (nn == n) {
4181           return;
4182         }
4183         if (nn != NULL && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) {
4184           wq.push(nn);
4185         }
4186       }
4187     }
4188     ShouldNotReachHere();
4189   }
4190 #endif
4191 }


4231     case Op_LoadI:
4232     case Op_LoadKlass:
4233     case Op_LoadNKlass:
4234     case Op_LoadL:
4235     case Op_LoadS:
4236     case Op_LoadP:
4237     case Op_LoadBarrierSlowReg:
4238     case Op_LoadBarrierWeakSlowReg:
4239     case Op_LoadN:
4240     case Op_LoadRange:
4241     case Op_LoadD_unaligned:
4242     case Op_LoadL_unaligned:
4243     case Op_StrComp:            // Does a bunch of load-like effects
4244     case Op_StrEquals:
4245     case Op_StrIndexOf:
4246     case Op_StrIndexOfChar:
4247     case Op_AryEq:
4248     case Op_HasNegatives:
4249       pinned = false;
4250     }
4251     if (UseShenandoahGC && n->is_CMove()) {
4252       pinned = false;
4253     }
4254     if( pinned ) {
4255       IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n));
4256       if( !chosen_loop->_child )       // Inner loop?
4257         chosen_loop->_body.push(n); // Collect inner loops
4258       return;
4259     }
4260   } else {                      // No slot zero
4261     if( n->is_CFG() ) {         // CFG with no slot 0 is dead
4262       _nodes.map(n->_idx,0);    // No block setting, it's globally dead
4263       return;
4264     }
4265     assert(!n->is_CFG() || n->outcnt() == 0, "");
4266   }
4267 
4268   // Do I have a "safe range" I can select over?
4269   Node *early = get_ctrl(n);// Early location already computed
4270 
4271   // Compute latest point this Node can go
4272   Node *LCA = get_late_ctrl( n, early );
4273   // LCA is NULL due to uses being dead


4498     }
4499     // Dump nodes it controls
4500     for( uint k = 0; k < _nodes.Size(); k++ ) {
4501       // (k < C->unique() && get_ctrl(find(k)) == n)
4502       if (k < C->unique() && _nodes[k] == (Node*)((intptr_t)n + 1)) {
4503         Node *m = C->root()->find(k);
4504         if( m && m->outcnt() > 0 ) {
4505           if (!(has_ctrl(m) && get_ctrl_no_update(m) == n)) {
4506             tty->print_cr("*** BROKEN CTRL ACCESSOR!  _nodes[k] is %p, ctrl is %p",
4507                           _nodes[k], has_ctrl(m) ? get_ctrl_no_update(m) : NULL);
4508           }
4509           for( uint j = 0; j < loop->_nest; j++ )
4510             tty->print("  ");
4511           tty->print(" ");
4512           m->dump();
4513         }
4514       }
4515     }
4516   }
4517 }
4518 #endif
4519 
4520 // Collect a R-P-O for the whole CFG.
4521 // Result list is in post-order (scan backwards for RPO)
4522 void PhaseIdealLoop::rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const {
4523   stk.push(start, 0);
4524   visited.set(start->_idx);
4525 
4526   while (stk.is_nonempty()) {
4527     Node* m   = stk.node();
4528     uint  idx = stk.index();
4529     if (idx < m->outcnt()) {
4530       stk.set_index(idx + 1);
4531       Node* n = m->raw_out(idx);
4532       if (n->is_CFG() && !visited.test_set(n->_idx)) {
4533         stk.push(n, 0);
4534       }
4535     } else {
4536       rpo_list.push(m);
4537       stk.pop();
4538     }
4539   }
4540 }

4541 
4542 
4543 //=============================================================================
4544 //------------------------------LoopTreeIterator-----------------------------------
4545 
4546 // Advance to next loop tree using a preorder, left-to-right traversal.
4547 void LoopTreeIterator::next() {
4548   assert(!done(), "must not be done.");
4549   if (_curnt->_child != NULL) {
4550     _curnt = _curnt->_child;
4551   } else if (_curnt->_next != NULL) {
4552     _curnt = _curnt->_next;
4553   } else {
4554     while (_curnt != _root && _curnt->_next == NULL) {
4555       _curnt = _curnt->_parent;
4556     }
4557     if (_curnt == _root) {
4558       _curnt = NULL;
4559       assert(done(), "must be done.");
4560     } else {
< prev index next >