< prev index next >

src/hotspot/share/opto/loopnode.hpp

Print this page

  26 #define SHARE_OPTO_LOOPNODE_HPP
  27 
  28 #include "opto/cfgnode.hpp"
  29 #include "opto/multnode.hpp"
  30 #include "opto/phaseX.hpp"
  31 #include "opto/predicates.hpp"
  32 #include "opto/subnode.hpp"
  33 #include "opto/type.hpp"
  34 #include "utilities/checkedCast.hpp"
  35 
  36 class CmpNode;
  37 class BaseCountedLoopEndNode;
  38 class CountedLoopNode;
  39 class IdealLoopTree;
  40 class LoopNode;
  41 class Node;
  42 class OuterStripMinedLoopEndNode;
  43 class PredicateBlock;
  44 class PathFrequency;
  45 class PhaseIdealLoop;

  46 class UnswitchedLoopSelector;
  47 class VectorSet;
  48 class VSharedData;
  49 class Invariance;
  50 struct small_cache;
  51 
  52 //
  53 //                  I D E A L I Z E D   L O O P S
  54 //
  55 // Idealized loops are the set of loops I perform more interesting
  56 // transformations on, beyond simple hoisting.
  57 
  58 //------------------------------LoopNode---------------------------------------
  59 // Simple loop header.  Fall in path on left, loop-back path on right.
  60 class LoopNode : public RegionNode {
  61   // Size is bigger to hold the flags.  However, the flags do not change
  62   // the semantics so it does not appear in the hash & cmp functions.
  63   virtual uint size_of() const { return sizeof(*this); }
  64 protected:
  65   uint _loop_flags;
  66   // Names for flag bitfields
  67   enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3,
  68          MainHasNoPreLoop      = 1<<2,
  69          HasExactTripCount     = 1<<3,
  70          InnerLoop             = 1<<4,
  71          PartialPeelLoop       = 1<<5,
  72          PartialPeelFailed     = 1<<6,
  73          WasSlpAnalyzed        = 1<<7,
  74          PassedSlpAnalysis     = 1<<8,
  75          DoUnrollOnly          = 1<<9,
  76          VectorizedLoop        = 1<<10,
  77          HasAtomicPostLoop     = 1<<11,
  78          StripMined            = 1<<12,
  79          SubwordLoop           = 1<<13,
  80          ProfileTripFailed     = 1<<14,
  81          LoopNestInnerLoop     = 1<<15,
  82          LoopNestLongOuterLoop = 1<<16 };

  83   char _unswitch_count;
  84   enum { _unswitch_max=3 };
  85 
  86   // Expected trip count from profile data
  87   float _profile_trip_cnt;
  88 
  89 public:
  90   // Names for edge indices
  91   enum { Self=0, EntryControl, LoopBackControl };
  92 
  93   bool is_inner_loop() const { return _loop_flags & InnerLoop; }
  94   void set_inner_loop() { _loop_flags |= InnerLoop; }
  95 
  96   bool is_vectorized_loop() const { return _loop_flags & VectorizedLoop; }
  97   bool is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; }
  98   void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; }
  99   bool partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
 100   bool is_strip_mined() const { return _loop_flags & StripMined; }
 101   bool is_profile_trip_failed() const { return _loop_flags & ProfileTripFailed; }
 102   bool is_subword_loop() const { return _loop_flags & SubwordLoop; }
 103   bool is_loop_nest_inner_loop() const { return _loop_flags & LoopNestInnerLoop; }
 104   bool is_loop_nest_outer_loop() const { return _loop_flags & LoopNestLongOuterLoop; }

 105 
 106   void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
 107   void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; }
 108   void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; }
 109   void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; }
 110   void mark_loop_vectorized() { _loop_flags |= VectorizedLoop; }
 111   void mark_has_atomic_post_loop() { _loop_flags |= HasAtomicPostLoop; }
 112   void mark_strip_mined() { _loop_flags |= StripMined; }
 113   void clear_strip_mined() { _loop_flags &= ~StripMined; }
 114   void mark_profile_trip_failed() { _loop_flags |= ProfileTripFailed; }
 115   void mark_subword_loop() { _loop_flags |= SubwordLoop; }
 116   void mark_loop_nest_inner_loop() { _loop_flags |= LoopNestInnerLoop; }
 117   void mark_loop_nest_outer_loop() { _loop_flags |= LoopNestLongOuterLoop; }

 118 
 119   int unswitch_max() { return _unswitch_max; }
 120   int unswitch_count() { return _unswitch_count; }
 121 
 122   void set_unswitch_count(int val) {
 123     assert (val <= unswitch_max(), "too many unswitches");
 124     _unswitch_count = val;
 125   }
 126 
 127   void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; }
 128   float profile_trip_cnt()             { return _profile_trip_cnt; }
 129 
 130   LoopNode(Node *entry, Node *backedge)
 131     : RegionNode(3), _loop_flags(0), _unswitch_count(0),
 132       _profile_trip_cnt(COUNT_UNKNOWN) {
 133     init_class_id(Class_Loop);
 134     init_req(EntryControl, entry);
 135     init_req(LoopBackControl, backedge);
 136   }
 137 

 673   // Convert to counted loops where possible
 674   void counted_loop( PhaseIdealLoop *phase );
 675 
 676   // Check for Node being a loop-breaking test
 677   Node *is_loop_exit(Node *iff) const;
 678 
 679   // Remove simplistic dead code from loop body
 680   void DCE_loop_body();
 681 
 682   // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
 683   // Replace with a 1-in-10 exit guess.
 684   void adjust_loop_exit_prob( PhaseIdealLoop *phase );
 685 
 686   // Return TRUE or FALSE if the loop should never be RCE'd or aligned.
 687   // Useful for unrolling loops with NO array accesses.
 688   bool policy_peel_only( PhaseIdealLoop *phase ) const;
 689 
 690   // Return TRUE or FALSE if the loop should be unswitched -- clone
 691   // loop with an invariant test
 692   bool policy_unswitching( PhaseIdealLoop *phase ) const;

 693 
 694   // Micro-benchmark spamming.  Remove empty loops.
 695   bool do_remove_empty_loop( PhaseIdealLoop *phase );
 696 
 697   // Convert one iteration loop into normal code.
 698   bool do_one_iteration_loop( PhaseIdealLoop *phase );
 699 
 700   // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
 701   // move some loop-invariant test (usually a null-check) before the loop.
 702   bool policy_peeling(PhaseIdealLoop *phase);
 703 
 704   uint estimate_peeling(PhaseIdealLoop *phase);
 705 
 706   // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any
 707   // known trip count in the counted loop node.
 708   bool policy_maximally_unroll(PhaseIdealLoop *phase) const;
 709 
 710   // Return TRUE or FALSE if the loop should be unrolled or not. Apply unroll
 711   // if the loop is a counted loop and the loop body is small enough.
 712   bool policy_unroll(PhaseIdealLoop *phase);

1414 
1415  public:
1416   // Change the control input of expensive nodes to allow commoning by
1417   // IGVN when it is guaranteed to not result in a more frequent
1418   // execution of the expensive node. Return true if progress.
1419   bool process_expensive_nodes();
1420 
1421   // Check whether node has become unreachable
1422   bool is_node_unreachable(Node *n) const {
1423     return !has_node(n) || n->is_unreachable(_igvn);
1424   }
1425 
1426   // Eliminate range-checks and other trip-counter vs loop-invariant tests.
1427   void do_range_check(IdealLoopTree *loop, Node_List &old_new);
1428 
1429   // Clone loop with an invariant test (that does not exit) and
1430   // insert a clone of the test that selects which version to
1431   // execute.
1432   void do_unswitching(IdealLoopTree* loop, Node_List& old_new);
1433 
1434   IfNode* find_unswitch_candidate(const IdealLoopTree* loop) const;

1435 
1436  private:
1437   static bool has_control_dependencies_from_predicates(LoopNode* head);
1438   static void revert_to_normal_loop(const LoopNode* loop_head);
1439 
1440   void hoist_invariant_check_casts(const IdealLoopTree* loop, const Node_List& old_new,
1441                                    const UnswitchedLoopSelector& unswitched_loop_selector);
1442   void add_unswitched_loop_version_bodies_to_igvn(IdealLoopTree* loop, const Node_List& old_new);
1443   static void increment_unswitch_counts(LoopNode* original_head, LoopNode* new_head);
1444   void remove_unswitch_candidate_from_loops(const Node_List& old_new, const UnswitchedLoopSelector& unswitched_loop_selector);
1445 #ifndef PRODUCT
1446   static void trace_loop_unswitching_count(IdealLoopTree* loop, LoopNode* original_head);
1447   static void trace_loop_unswitching_impossible(const LoopNode* original_head);
1448   static void trace_loop_unswitching_result(const UnswitchedLoopSelector& unswitched_loop_selector,

1449                                             const LoopNode* original_head, const LoopNode* new_head);
1450 #endif
1451 
1452  public:
1453 
1454   // Range Check Elimination uses this function!
1455   // Constrain the main loop iterations so the affine function:
1456   //    low_limit <= scale_con * I + offset  <  upper_limit
1457   // always holds true.  That is, either increase the number of iterations in
1458   // the pre-loop or the post-loop until the condition holds true in the main
1459   // loop.  Scale_con, offset and limit are all loop invariant.
1460   void add_constraint(jlong stride_con, jlong scale_con, Node* offset, Node* low_limit, Node* upper_limit, Node* pre_ctrl, Node** pre_limit, Node** main_limit);
1461   // Helper function for add_constraint().
1462   Node* adjust_limit(bool reduce, Node* scale, Node* offset, Node* rc_limit, Node* old_limit, Node* pre_ctrl, bool round);
1463 
1464   // Partially peel loop up through last_peel node.
1465   bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
1466   bool duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new);
1467 
1468   // AutoVectorize the loop: replace scalar ops with vector ops.

1563   bool intrinsify_fill(IdealLoopTree* lpt);
1564   bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
1565                        Node*& shift, Node*& offset);
1566 
1567 private:
1568   // Return a type based on condition control flow
1569   const TypeInt* filtered_type( Node *n, Node* n_ctrl);
1570   const TypeInt* filtered_type( Node *n ) { return filtered_type(n, nullptr); }
1571  // Helpers for filtered type
1572   const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl);
1573 
1574   // Helper functions
1575   Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache );
1576   Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true );
1577   void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true );
1578   bool split_up( Node *n, Node *blk1, Node *blk2 );
1579 
1580   Node* place_outside_loop(Node* useblock, IdealLoopTree* loop) const;
1581   Node* try_move_store_before_loop(Node* n, Node *n_ctrl);
1582   void try_move_store_after_loop(Node* n);

1583   bool identical_backtoback_ifs(Node *n);

1584   bool can_split_if(Node *n_ctrl);
1585   bool cannot_split_division(const Node* n, const Node* region) const;
1586   static bool is_divisor_loop_phi(const Node* divisor, const Node* loop);
1587   bool loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const;
1588 
1589   // Determine if a method is too big for a/another round of split-if, based on
1590   // a magic (approximate) ratio derived from the equally magic constant 35000,
1591   // previously used for this purpose (but without relating to the node limit).
1592   bool must_throttle_split_if() {
1593     uint threshold = C->max_node_limit() * 2 / 5;
1594     return C->live_nodes() > threshold;
1595   }
1596 
1597   // A simplistic node request tracking mechanism, where
1598   //   = UINT_MAX   Request not valid or made final.
1599   //   < UINT_MAX   Nodes currently requested (estimate).
1600   uint _nodes_required;
1601 
1602   enum { REQUIRE_MIN = 70 };
1603 

1773                      uint new_counter, Node_List& old_new, Node_List& worklist, Node_List*& split_if_set,
1774                      Node_List*& split_bool_set, Node_List*& split_cex_set);
1775 
1776   void finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set);
1777 
1778   bool at_relevant_ctrl(Node* n, const Node* blk1, const Node* blk2);
1779 
1780   bool clone_cmp_loadklass_down(Node* n, const Node* blk1, const Node* blk2);
1781   void clone_loadklass_nodes_at_cmp_index(const Node* n, Node* cmp, int i);
1782   bool clone_cmp_down(Node* n, const Node* blk1, const Node* blk2);
1783   void clone_template_assertion_expression_down(Node* node);
1784 
1785   Node* similar_subtype_check(const Node* x, Node* r_in);
1786 
1787   void update_addp_chain_base(Node* x, Node* old_base, Node* new_base);
1788 
1789   bool can_move_to_inner_loop(Node* n, LoopNode* n_loop, Node* x);
1790 
1791   void pin_array_access_nodes_dependent_on(Node* ctrl);
1792 


1793   Node* ensure_node_and_inputs_are_above_pre_end(CountedLoopEndNode* pre_end, Node* node);
1794 };
1795 
1796 
1797 class AutoNodeBudget : public StackObj
1798 {
1799 public:
1800   enum budget_check_t { BUDGET_CHECK, NO_BUDGET_CHECK };
1801 
1802   AutoNodeBudget(PhaseIdealLoop* phase, budget_check_t chk = BUDGET_CHECK)
1803     : _phase(phase),
1804       _check_at_final(chk == BUDGET_CHECK),
1805       _nodes_at_begin(0)
1806   {
1807     precond(_phase != nullptr);
1808 
1809     _nodes_at_begin = _phase->require_nodes_begin();
1810   }
1811 
1812   ~AutoNodeBudget() {

  26 #define SHARE_OPTO_LOOPNODE_HPP
  27 
  28 #include "opto/cfgnode.hpp"
  29 #include "opto/multnode.hpp"
  30 #include "opto/phaseX.hpp"
  31 #include "opto/predicates.hpp"
  32 #include "opto/subnode.hpp"
  33 #include "opto/type.hpp"
  34 #include "utilities/checkedCast.hpp"
  35 
  36 class CmpNode;
  37 class BaseCountedLoopEndNode;
  38 class CountedLoopNode;
  39 class IdealLoopTree;
  40 class LoopNode;
  41 class Node;
  42 class OuterStripMinedLoopEndNode;
  43 class PredicateBlock;
  44 class PathFrequency;
  45 class PhaseIdealLoop;
  46 class UnswitchCandidate;
  47 class UnswitchedLoopSelector;
  48 class VectorSet;
  49 class VSharedData;
  50 class Invariance;
  51 struct small_cache;
  52 
  53 //
  54 //                  I D E A L I Z E D   L O O P S
  55 //
  56 // Idealized loops are the set of loops I perform more interesting
  57 // transformations on, beyond simple hoisting.
  58 
  59 //------------------------------LoopNode---------------------------------------
  60 // Simple loop header.  Fall in path on left, loop-back path on right.
  61 class LoopNode : public RegionNode {
  62   // Size is bigger to hold the flags.  However, the flags do not change
  63   // the semantics so it does not appear in the hash & cmp functions.
  64   virtual uint size_of() const { return sizeof(*this); }
  65 protected:
  66   uint _loop_flags;
  67   // Names for flag bitfields
  68   enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3,
  69          MainHasNoPreLoop      = 1<<2,
  70          HasExactTripCount     = 1<<3,
  71          InnerLoop             = 1<<4,
  72          PartialPeelLoop       = 1<<5,
  73          PartialPeelFailed     = 1<<6,
  74          WasSlpAnalyzed        = 1<<7,
  75          PassedSlpAnalysis     = 1<<8,
  76          DoUnrollOnly          = 1<<9,
  77          VectorizedLoop        = 1<<10,
  78          HasAtomicPostLoop     = 1<<11,
  79          StripMined            = 1<<12,
  80          SubwordLoop           = 1<<13,
  81          ProfileTripFailed     = 1<<14,
  82          LoopNestInnerLoop     = 1<<15,
  83          LoopNestLongOuterLoop = 1<<16,
  84          FlatArrays            = 1<<17};
  85   char _unswitch_count;
  86   enum { _unswitch_max=3 };
  87 
  88   // Expected trip count from profile data
  89   float _profile_trip_cnt;
  90 
  91 public:
  92   // Names for edge indices
  93   enum { Self=0, EntryControl, LoopBackControl };
  94 
  95   bool is_inner_loop() const { return _loop_flags & InnerLoop; }
  96   void set_inner_loop() { _loop_flags |= InnerLoop; }
  97 
  98   bool is_vectorized_loop() const { return _loop_flags & VectorizedLoop; }
  99   bool is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; }
 100   void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; }
 101   bool partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
 102   bool is_strip_mined() const { return _loop_flags & StripMined; }
 103   bool is_profile_trip_failed() const { return _loop_flags & ProfileTripFailed; }
 104   bool is_subword_loop() const { return _loop_flags & SubwordLoop; }
 105   bool is_loop_nest_inner_loop() const { return _loop_flags & LoopNestInnerLoop; }
 106   bool is_loop_nest_outer_loop() const { return _loop_flags & LoopNestLongOuterLoop; }
 107   bool is_flat_arrays() const { return _loop_flags & FlatArrays; }
 108 
 109   void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
 110   void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; }
 111   void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; }
 112   void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; }
 113   void mark_loop_vectorized() { _loop_flags |= VectorizedLoop; }
 114   void mark_has_atomic_post_loop() { _loop_flags |= HasAtomicPostLoop; }
 115   void mark_strip_mined() { _loop_flags |= StripMined; }
 116   void clear_strip_mined() { _loop_flags &= ~StripMined; }
 117   void mark_profile_trip_failed() { _loop_flags |= ProfileTripFailed; }
 118   void mark_subword_loop() { _loop_flags |= SubwordLoop; }
 119   void mark_loop_nest_inner_loop() { _loop_flags |= LoopNestInnerLoop; }
 120   void mark_loop_nest_outer_loop() { _loop_flags |= LoopNestLongOuterLoop; }
 121   void mark_flat_arrays() { _loop_flags |= FlatArrays; }
 122 
 123   int unswitch_max() { return _unswitch_max; }
 124   int unswitch_count() { return _unswitch_count; }
 125 
 126   void set_unswitch_count(int val) {
 127     assert (val <= unswitch_max(), "too many unswitches");
 128     _unswitch_count = val;
 129   }
 130 
 131   void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; }
 132   float profile_trip_cnt()             { return _profile_trip_cnt; }
 133 
 134   LoopNode(Node *entry, Node *backedge)
 135     : RegionNode(3), _loop_flags(0), _unswitch_count(0),
 136       _profile_trip_cnt(COUNT_UNKNOWN) {
 137     init_class_id(Class_Loop);
 138     init_req(EntryControl, entry);
 139     init_req(LoopBackControl, backedge);
 140   }
 141 

 677   // Convert to counted loops where possible
 678   void counted_loop( PhaseIdealLoop *phase );
 679 
 680   // Check for Node being a loop-breaking test
 681   Node *is_loop_exit(Node *iff) const;
 682 
 683   // Remove simplistic dead code from loop body
 684   void DCE_loop_body();
 685 
 686   // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
 687   // Replace with a 1-in-10 exit guess.
 688   void adjust_loop_exit_prob( PhaseIdealLoop *phase );
 689 
 690   // Return TRUE or FALSE if the loop should never be RCE'd or aligned.
 691   // Useful for unrolling loops with NO array accesses.
 692   bool policy_peel_only( PhaseIdealLoop *phase ) const;
 693 
 694   // Return TRUE or FALSE if the loop should be unswitched -- clone
 695   // loop with an invariant test
 696   bool policy_unswitching( PhaseIdealLoop *phase ) const;
 697   bool no_unswitch_candidate() const;
 698 
 699   // Micro-benchmark spamming.  Remove empty loops.
 700   bool do_remove_empty_loop( PhaseIdealLoop *phase );
 701 
 702   // Convert one iteration loop into normal code.
 703   bool do_one_iteration_loop( PhaseIdealLoop *phase );
 704 
 705   // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
 706   // move some loop-invariant test (usually a null-check) before the loop.
 707   bool policy_peeling(PhaseIdealLoop *phase);
 708 
 709   uint estimate_peeling(PhaseIdealLoop *phase);
 710 
 711   // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any
 712   // known trip count in the counted loop node.
 713   bool policy_maximally_unroll(PhaseIdealLoop *phase) const;
 714 
 715   // Return TRUE or FALSE if the loop should be unrolled or not. Apply unroll
 716   // if the loop is a counted loop and the loop body is small enough.
 717   bool policy_unroll(PhaseIdealLoop *phase);

1419 
1420  public:
1421   // Change the control input of expensive nodes to allow commoning by
1422   // IGVN when it is guaranteed to not result in a more frequent
1423   // execution of the expensive node. Return true if progress.
1424   bool process_expensive_nodes();
1425 
1426   // Check whether node has become unreachable
1427   bool is_node_unreachable(Node *n) const {
1428     return !has_node(n) || n->is_unreachable(_igvn);
1429   }
1430 
1431   // Eliminate range-checks and other trip-counter vs loop-invariant tests.
1432   void do_range_check(IdealLoopTree *loop, Node_List &old_new);
1433 
1434   // Clone loop with an invariant test (that does not exit) and
1435   // insert a clone of the test that selects which version to
1436   // execute.
1437   void do_unswitching(IdealLoopTree* loop, Node_List& old_new);
1438 
1439   IfNode* find_unswitch_candidates(const IdealLoopTree* loop, Node_List& flat_array_checks) const;
1440   IfNode* find_unswitch_candidate_from_idoms(const IdealLoopTree* loop) const;
1441 
1442  private:
1443   static bool has_control_dependencies_from_predicates(LoopNode* head);
1444   static void revert_to_normal_loop(const LoopNode* loop_head);
1445 
1446   void hoist_invariant_check_casts(const IdealLoopTree* loop, const Node_List& old_new,
1447                                    const UnswitchCandidate& unswitch_candidate, const IfNode* loop_selector);
1448   void add_unswitched_loop_version_bodies_to_igvn(IdealLoopTree* loop, const Node_List& old_new);
1449   static void increment_unswitch_counts(LoopNode* original_head, LoopNode* new_head);
1450   void remove_unswitch_candidate_from_loops(const Node_List& old_new, const UnswitchedLoopSelector& unswitched_loop_selector);
1451 #ifndef PRODUCT
1452   static void trace_loop_unswitching_count(IdealLoopTree* loop, LoopNode* original_head);
1453   static void trace_loop_unswitching_impossible(const LoopNode* original_head);
1454   static void trace_loop_unswitching_result(const UnswitchedLoopSelector& unswitched_loop_selector,
1455                                             const UnswitchCandidate& unswitch_candidate,
1456                                             const LoopNode* original_head, const LoopNode* new_head);
1457 #endif
1458 
1459  public:
1460 
1461   // Range Check Elimination uses this function!
1462   // Constrain the main loop iterations so the affine function:
1463   //    low_limit <= scale_con * I + offset  <  upper_limit
1464   // always holds true.  That is, either increase the number of iterations in
1465   // the pre-loop or the post-loop until the condition holds true in the main
1466   // loop.  Scale_con, offset and limit are all loop invariant.
1467   void add_constraint(jlong stride_con, jlong scale_con, Node* offset, Node* low_limit, Node* upper_limit, Node* pre_ctrl, Node** pre_limit, Node** main_limit);
1468   // Helper function for add_constraint().
1469   Node* adjust_limit(bool reduce, Node* scale, Node* offset, Node* rc_limit, Node* old_limit, Node* pre_ctrl, bool round);
1470 
1471   // Partially peel loop up through last_peel node.
1472   bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
1473   bool duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new);
1474 
1475   // AutoVectorize the loop: replace scalar ops with vector ops.

1570   bool intrinsify_fill(IdealLoopTree* lpt);
1571   bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
1572                        Node*& shift, Node*& offset);
1573 
1574 private:
1575   // Return a type based on condition control flow
1576   const TypeInt* filtered_type( Node *n, Node* n_ctrl);
1577   const TypeInt* filtered_type( Node *n ) { return filtered_type(n, nullptr); }
1578  // Helpers for filtered type
1579   const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl);
1580 
1581   // Helper functions
1582   Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache );
1583   Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true );
1584   void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true );
1585   bool split_up( Node *n, Node *blk1, Node *blk2 );
1586 
1587   Node* place_outside_loop(Node* useblock, IdealLoopTree* loop) const;
1588   Node* try_move_store_before_loop(Node* n, Node *n_ctrl);
1589   void try_move_store_after_loop(Node* n);
1590   void move_flat_array_check_out_of_loop(Node* n);
1591   bool identical_backtoback_ifs(Node *n);
1592   bool flat_array_element_type_check(Node *n);
1593   bool can_split_if(Node *n_ctrl);
1594   bool cannot_split_division(const Node* n, const Node* region) const;
1595   static bool is_divisor_loop_phi(const Node* divisor, const Node* loop);
1596   bool loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const;
1597 
1598   // Determine if a method is too big for a/another round of split-if, based on
1599   // a magic (approximate) ratio derived from the equally magic constant 35000,
1600   // previously used for this purpose (but without relating to the node limit).
1601   bool must_throttle_split_if() {
1602     uint threshold = C->max_node_limit() * 2 / 5;
1603     return C->live_nodes() > threshold;
1604   }
1605 
1606   // A simplistic node request tracking mechanism, where
1607   //   = UINT_MAX   Request not valid or made final.
1608   //   < UINT_MAX   Nodes currently requested (estimate).
1609   uint _nodes_required;
1610 
1611   enum { REQUIRE_MIN = 70 };
1612 

1782                      uint new_counter, Node_List& old_new, Node_List& worklist, Node_List*& split_if_set,
1783                      Node_List*& split_bool_set, Node_List*& split_cex_set);
1784 
1785   void finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set);
1786 
1787   bool at_relevant_ctrl(Node* n, const Node* blk1, const Node* blk2);
1788 
1789   bool clone_cmp_loadklass_down(Node* n, const Node* blk1, const Node* blk2);
1790   void clone_loadklass_nodes_at_cmp_index(const Node* n, Node* cmp, int i);
1791   bool clone_cmp_down(Node* n, const Node* blk1, const Node* blk2);
1792   void clone_template_assertion_expression_down(Node* node);
1793 
1794   Node* similar_subtype_check(const Node* x, Node* r_in);
1795 
1796   void update_addp_chain_base(Node* x, Node* old_base, Node* new_base);
1797 
1798   bool can_move_to_inner_loop(Node* n, LoopNode* n_loop, Node* x);
1799 
1800   void pin_array_access_nodes_dependent_on(Node* ctrl);
1801 
1802   void collect_flat_array_checks(const IdealLoopTree* loop, Node_List& flat_array_checks) const;
1803 
1804   Node* ensure_node_and_inputs_are_above_pre_end(CountedLoopEndNode* pre_end, Node* node);
1805 };
1806 
1807 
1808 class AutoNodeBudget : public StackObj
1809 {
1810 public:
1811   enum budget_check_t { BUDGET_CHECK, NO_BUDGET_CHECK };
1812 
1813   AutoNodeBudget(PhaseIdealLoop* phase, budget_check_t chk = BUDGET_CHECK)
1814     : _phase(phase),
1815       _check_at_final(chk == BUDGET_CHECK),
1816       _nodes_at_begin(0)
1817   {
1818     precond(_phase != nullptr);
1819 
1820     _nodes_at_begin = _phase->require_nodes_begin();
1821   }
1822 
1823   ~AutoNodeBudget() {
< prev index next >