26 #define SHARE_OPTO_LOOPNODE_HPP
27
28 #include "opto/cfgnode.hpp"
29 #include "opto/multnode.hpp"
30 #include "opto/phaseX.hpp"
31 #include "opto/predicates.hpp"
32 #include "opto/subnode.hpp"
33 #include "opto/type.hpp"
34 #include "utilities/checkedCast.hpp"
35
36 class CmpNode;
37 class BaseCountedLoopEndNode;
38 class CountedLoopNode;
39 class IdealLoopTree;
40 class LoopNode;
41 class Node;
42 class OuterStripMinedLoopEndNode;
43 class PredicateBlock;
44 class PathFrequency;
45 class PhaseIdealLoop;
46 class LoopSelector;
47 class ReachabilityFenceNode;
48 class UnswitchedLoopSelector;
49 class VectorSet;
50 class VSharedData;
51 class Invariance;
52 struct small_cache;
53
54 //
55 // I D E A L I Z E D L O O P S
56 //
57 // Idealized loops are the set of loops I perform more interesting
58 // transformations on, beyond simple hoisting.
59
60 //------------------------------LoopNode---------------------------------------
61 // Simple loop header. Fall in path on left, loop-back path on right.
62 class LoopNode : public RegionNode {
63 // Size is bigger to hold the flags. However, the flags do not change
64 // the semantics so it does not appear in the hash & cmp functions.
65 virtual uint size_of() const { return sizeof(*this); }
69 enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3,
70 MainHasNoPreLoop = 1<<2,
71 HasExactTripCount = 1<<3,
72 InnerLoop = 1<<4,
73 PartialPeelLoop = 1<<5,
74 PartialPeelFailed = 1<<6,
75 WasSlpAnalyzed = 1<<7,
76 PassedSlpAnalysis = 1<<8,
77 DoUnrollOnly = 1<<9,
78 VectorizedLoop = 1<<10,
79 HasAtomicPostLoop = 1<<11,
80 StripMined = 1<<12,
81 SubwordLoop = 1<<13,
82 ProfileTripFailed = 1<<14,
83 LoopNestInnerLoop = 1<<15,
84 LoopNestLongOuterLoop = 1<<16,
85 MultiversionFastLoop = 1<<17,
86 MultiversionSlowLoop = 2<<17,
87 MultiversionDelayedSlowLoop = 3<<17,
88 MultiversionFlagsMask = 3<<17,
89 };
90 char _unswitch_count;
91 enum { _unswitch_max=3 };
92
93 // Expected trip count from profile data
94 float _profile_trip_cnt;
95
96 public:
97 // Names for edge indices
98 enum { Self=0, EntryControl, LoopBackControl };
99
100 bool is_inner_loop() const { return _loop_flags & InnerLoop; }
101 void set_inner_loop() { _loop_flags |= InnerLoop; }
102
103 bool is_vectorized_loop() const { return _loop_flags & VectorizedLoop; }
104 bool is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; }
105 void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; }
106 bool partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
107 bool is_strip_mined() const { return _loop_flags & StripMined; }
108 bool is_profile_trip_failed() const { return _loop_flags & ProfileTripFailed; }
109 bool is_subword_loop() const { return _loop_flags & SubwordLoop; }
110 bool is_loop_nest_inner_loop() const { return _loop_flags & LoopNestInnerLoop; }
111 bool is_loop_nest_outer_loop() const { return _loop_flags & LoopNestLongOuterLoop; }
112
113 void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
114 void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; }
115 void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; }
116 void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; }
117 void mark_loop_vectorized() { _loop_flags |= VectorizedLoop; }
118 void mark_has_atomic_post_loop() { _loop_flags |= HasAtomicPostLoop; }
119 void mark_strip_mined() { _loop_flags |= StripMined; }
120 void clear_strip_mined() { _loop_flags &= ~StripMined; }
121 void mark_profile_trip_failed() { _loop_flags |= ProfileTripFailed; }
122 void mark_subword_loop() { _loop_flags |= SubwordLoop; }
123 void mark_loop_nest_inner_loop() { _loop_flags |= LoopNestInnerLoop; }
124 void mark_loop_nest_outer_loop() { _loop_flags |= LoopNestLongOuterLoop; }
125
126 int unswitch_max() { return _unswitch_max; }
127 int unswitch_count() { return _unswitch_count; }
128
129 void set_unswitch_count(int val) {
130 assert (val <= unswitch_max(), "too many unswitches");
131 _unswitch_count = val;
132 }
133
134 void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; }
135 float profile_trip_cnt() { return _profile_trip_cnt; }
136
137 #ifndef PRODUCT
138 uint _stress_peeling_attempts = 0;
139 #endif
140
141 LoopNode(Node *entry, Node *backedge)
142 : RegionNode(3), _loop_flags(0), _unswitch_count(0),
143 _profile_trip_cnt(COUNT_UNKNOWN) {
144 init_class_id(Class_Loop);
722 // Check for Node being a loop-breaking test
723 Node *is_loop_exit(Node *iff) const;
724
725 // Return unique loop-exit projection or null if the loop has multiple exits.
726 IfFalseNode* unique_loop_exit_proj_or_null();
727
728 // Remove simplistic dead code from loop body
729 void DCE_loop_body();
730
731 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
732 // Replace with a 1-in-10 exit guess.
733 void adjust_loop_exit_prob( PhaseIdealLoop *phase );
734
735 // Return TRUE or FALSE if the loop should never be RCE'd or aligned.
736 // Useful for unrolling loops with NO array accesses.
737 bool policy_peel_only( PhaseIdealLoop *phase ) const;
738
739 // Return TRUE or FALSE if the loop should be unswitched -- clone
740 // loop with an invariant test
741 bool policy_unswitching( PhaseIdealLoop *phase ) const;
742
743 // Micro-benchmark spamming. Remove empty loops.
744 bool do_remove_empty_loop( PhaseIdealLoop *phase );
745
746 // Convert one-iteration loop into normal code.
747 bool do_one_iteration_loop( PhaseIdealLoop *phase );
748
749 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
750 // move some loop-invariant test (usually a null-check) before the loop.
751 bool policy_peeling(PhaseIdealLoop *phase);
752
753 uint estimate_peeling(PhaseIdealLoop *phase);
754
755 // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any
756 // known trip count in the counted loop node.
757 bool policy_maximally_unroll(PhaseIdealLoop *phase) const;
758
759 // Return TRUE or FALSE if the loop should be unrolled or not. Apply unroll
760 // if the loop is a counted loop and the loop body is small enough.
761 bool policy_unroll(PhaseIdealLoop *phase);
1647
1648 public:
1649 // Change the control input of expensive nodes to allow commoning by
1650 // IGVN when it is guaranteed to not result in a more frequent
1651 // execution of the expensive node. Return true if progress.
1652 bool process_expensive_nodes();
1653
1654 // Check whether node has become unreachable
1655 bool is_node_unreachable(Node *n) const {
1656 return !has_node(n) || n->is_unreachable(_igvn);
1657 }
1658
1659 // Eliminate range-checks and other trip-counter vs loop-invariant tests.
1660 void do_range_check(IdealLoopTree* loop);
1661
1662 // Clone loop with an invariant test (that does not exit) and
1663 // insert a clone of the test that selects which version to
1664 // execute.
1665 void do_unswitching(IdealLoopTree* loop, Node_List& old_new);
1666
1667 IfNode* find_unswitch_candidate(const IdealLoopTree* loop) const;
1668
1669 private:
1670 static bool has_control_dependencies_from_predicates(LoopNode* head);
1671 static void revert_to_normal_loop(const LoopNode* loop_head);
1672
1673 void hoist_invariant_check_casts(const IdealLoopTree* loop, const Node_List& old_new,
1674 const UnswitchedLoopSelector& unswitched_loop_selector);
1675 void add_unswitched_loop_version_bodies_to_igvn(IdealLoopTree* loop, const Node_List& old_new);
1676 static void increment_unswitch_counts(LoopNode* original_head, LoopNode* new_head);
1677 void remove_unswitch_candidate_from_loops(const Node_List& old_new, const UnswitchedLoopSelector& unswitched_loop_selector);
1678 #ifndef PRODUCT
1679 static void trace_loop_unswitching_count(IdealLoopTree* loop, LoopNode* original_head);
1680 static void trace_loop_unswitching_impossible(const LoopNode* original_head);
1681 static void trace_loop_unswitching_result(const UnswitchedLoopSelector& unswitched_loop_selector,
1682 const LoopNode* original_head, const LoopNode* new_head);
1683 static void trace_loop_multiversioning_result(const LoopSelector& loop_selector,
1684 const LoopNode* original_head, const LoopNode* new_head);
1685 #endif
1686
1687 public:
1688
1689 // Range Check Elimination uses this function!
1690 // Constrain the main loop iterations so the affine function:
1691 // low_limit <= scale_con * I + offset < upper_limit
1692 // always holds true. That is, either increase the number of iterations in
1693 // the pre-loop or the post-loop until the condition holds true in the main
1694 // loop. Scale_con, offset and limit are all loop invariant.
1695 void add_constraint(jlong stride_con, jlong scale_con, Node* offset, Node* low_limit, Node* upper_limit, Node* pre_ctrl, Node** pre_limit, Node** main_limit);
1696 // Helper function for add_constraint().
1697 Node* adjust_limit(bool reduce, Node* scale, Node* offset, Node* rc_limit, Node* old_limit, Node* pre_ctrl, bool round);
1698
1699 // Partially peel loop up through last_peel node.
1700 bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
1701 bool duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new);
1853 void split_thru_phi_yank_old_nodes(Node* n, Node* region);
1854
1855 public:
1856
1857 // Conversion of fill/copy patterns into intrinsic versions
1858 bool do_intrinsify_fill();
1859 bool intrinsify_fill(IdealLoopTree* lpt);
1860 bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
1861 Node*& shift, Node*& offset);
1862
1863 private:
1864 // Helper functions
1865 Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache );
1866 Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true );
1867 void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true );
1868 bool split_up( Node *n, Node *blk1, Node *blk2 );
1869
1870 Node* place_outside_loop(Node* useblock, IdealLoopTree* loop) const;
1871 Node* try_move_store_before_loop(Node* n, Node *n_ctrl);
1872 void try_move_store_after_loop(Node* n);
1873 bool identical_backtoback_ifs(Node *n);
1874 bool can_split_if(Node *n_ctrl);
1875 bool cannot_split_division(const Node* n, const Node* region) const;
1876 static bool is_divisor_loop_phi(const Node* divisor, const Node* loop);
1877 bool loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const;
1878
1879 // Determine if a method is too big for a/another round of split-if, based on
1880 // a magic (approximate) ratio derived from the equally magic constant 35000,
1881 // previously used for this purpose (but without relating to the node limit).
1882 bool must_throttle_split_if() {
1883 uint threshold = C->max_node_limit() * 2 / 5;
1884 return C->live_nodes() > threshold;
1885 }
1886
1887 // A simplistic node request tracking mechanism, where
1888 // = UINT_MAX Request not valid or made final.
1889 // < UINT_MAX Nodes currently requested (estimate).
1890 uint _nodes_required;
1891
1892 enum { REQUIRE_MIN = 70 };
1893
2044 uint new_counter, Node_List& old_new, Node_List& worklist, Node_List*& split_if_set,
2045 Node_List*& split_bool_set, Node_List*& split_cex_set);
2046
2047 void finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set);
2048
2049 bool at_relevant_ctrl(Node* n, const Node* blk1, const Node* blk2);
2050
2051 bool clone_cmp_loadklass_down(Node* n, const Node* blk1, const Node* blk2);
2052 void clone_loadklass_nodes_at_cmp_index(const Node* n, Node* cmp, int i);
2053 bool clone_cmp_down(Node* n, const Node* blk1, const Node* blk2);
2054 void clone_template_assertion_expression_down(Node* node);
2055
2056 Node* similar_subtype_check(const Node* x, Node* r_in);
2057
2058 void update_addp_chain_base(Node* x, Node* old_base, Node* new_base);
2059
2060 bool can_move_to_inner_loop(Node* n, LoopNode* n_loop, Node* x);
2061
2062 void pin_nodes_dependent_on(Node* ctrl, bool old_iff_is_rangecheck);
2063
2064 Node* ensure_node_and_inputs_are_above_pre_end(CountedLoopEndNode* pre_end, Node* node);
2065
2066 Node* new_assertion_predicate_opaque_init(Node* entry_control, Node* init, Node* int_zero);
2067
2068 bool try_make_short_running_loop(IdealLoopTree* loop, jint stride_con, const Node_List& range_checks, const uint iters_limit);
2069
2070 ConINode* intcon(jint i);
2071
2072 ConLNode* longcon(jlong i);
2073
2074 ConNode* makecon(const Type* t);
2075
2076 ConNode* integercon(jlong l, BasicType bt);
2077
2078 ConNode* zerocon(BasicType bt);
2079 };
2080
2081 class CountedLoopConverter {
2082 friend class PhaseIdealLoop;
2083
|
26 #define SHARE_OPTO_LOOPNODE_HPP
27
28 #include "opto/cfgnode.hpp"
29 #include "opto/multnode.hpp"
30 #include "opto/phaseX.hpp"
31 #include "opto/predicates.hpp"
32 #include "opto/subnode.hpp"
33 #include "opto/type.hpp"
34 #include "utilities/checkedCast.hpp"
35
36 class CmpNode;
37 class BaseCountedLoopEndNode;
38 class CountedLoopNode;
39 class IdealLoopTree;
40 class LoopNode;
41 class Node;
42 class OuterStripMinedLoopEndNode;
43 class PredicateBlock;
44 class PathFrequency;
45 class PhaseIdealLoop;
46 class UnswitchCandidate;
47 class LoopSelector;
48 class ReachabilityFenceNode;
49 class UnswitchedLoopSelector;
50 class VectorSet;
51 class VSharedData;
52 class Invariance;
53 struct small_cache;
54
55 //
56 // I D E A L I Z E D L O O P S
57 //
58 // Idealized loops are the set of loops I perform more interesting
59 // transformations on, beyond simple hoisting.
60
61 //------------------------------LoopNode---------------------------------------
62 // Simple loop header. Fall in path on left, loop-back path on right.
63 class LoopNode : public RegionNode {
64 // Size is bigger to hold the flags. However, the flags do not change
65 // the semantics so it does not appear in the hash & cmp functions.
66 virtual uint size_of() const { return sizeof(*this); }
70 enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3,
71 MainHasNoPreLoop = 1<<2,
72 HasExactTripCount = 1<<3,
73 InnerLoop = 1<<4,
74 PartialPeelLoop = 1<<5,
75 PartialPeelFailed = 1<<6,
76 WasSlpAnalyzed = 1<<7,
77 PassedSlpAnalysis = 1<<8,
78 DoUnrollOnly = 1<<9,
79 VectorizedLoop = 1<<10,
80 HasAtomicPostLoop = 1<<11,
81 StripMined = 1<<12,
82 SubwordLoop = 1<<13,
83 ProfileTripFailed = 1<<14,
84 LoopNestInnerLoop = 1<<15,
85 LoopNestLongOuterLoop = 1<<16,
86 MultiversionFastLoop = 1<<17,
87 MultiversionSlowLoop = 2<<17,
88 MultiversionDelayedSlowLoop = 3<<17,
89 MultiversionFlagsMask = 3<<17,
90 FlatArrays = 1<<18};
91 char _unswitch_count;
92 enum { _unswitch_max=3 };
93
94 // Expected trip count from profile data
95 float _profile_trip_cnt;
96
97 public:
98 // Names for edge indices
99 enum { Self=0, EntryControl, LoopBackControl };
100
101 bool is_inner_loop() const { return _loop_flags & InnerLoop; }
102 void set_inner_loop() { _loop_flags |= InnerLoop; }
103
104 bool is_vectorized_loop() const { return _loop_flags & VectorizedLoop; }
105 bool is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; }
106 void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; }
107 bool partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
108 bool is_strip_mined() const { return _loop_flags & StripMined; }
109 bool is_profile_trip_failed() const { return _loop_flags & ProfileTripFailed; }
110 bool is_subword_loop() const { return _loop_flags & SubwordLoop; }
111 bool is_loop_nest_inner_loop() const { return _loop_flags & LoopNestInnerLoop; }
112 bool is_loop_nest_outer_loop() const { return _loop_flags & LoopNestLongOuterLoop; }
113 bool is_flat_arrays() const { return _loop_flags & FlatArrays; }
114
115 void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
116 void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; }
117 void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; }
118 void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; }
119 void mark_loop_vectorized() { _loop_flags |= VectorizedLoop; }
120 void mark_has_atomic_post_loop() { _loop_flags |= HasAtomicPostLoop; }
121 void mark_strip_mined() { _loop_flags |= StripMined; }
122 void clear_strip_mined() { _loop_flags &= ~StripMined; }
123 void mark_profile_trip_failed() { _loop_flags |= ProfileTripFailed; }
124 void mark_subword_loop() { _loop_flags |= SubwordLoop; }
125 void mark_loop_nest_inner_loop() { _loop_flags |= LoopNestInnerLoop; }
126 void mark_loop_nest_outer_loop() { _loop_flags |= LoopNestLongOuterLoop; }
127 void mark_flat_arrays() { _loop_flags |= FlatArrays; }
128
129 int unswitch_max() { return _unswitch_max; }
130 int unswitch_count() { return _unswitch_count; }
131
132 void set_unswitch_count(int val) {
133 assert (val <= unswitch_max(), "too many unswitches");
134 _unswitch_count = val;
135 }
136
137 void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; }
138 float profile_trip_cnt() { return _profile_trip_cnt; }
139
140 #ifndef PRODUCT
141 uint _stress_peeling_attempts = 0;
142 #endif
143
144 LoopNode(Node *entry, Node *backedge)
145 : RegionNode(3), _loop_flags(0), _unswitch_count(0),
146 _profile_trip_cnt(COUNT_UNKNOWN) {
147 init_class_id(Class_Loop);
725 // Check for Node being a loop-breaking test
726 Node *is_loop_exit(Node *iff) const;
727
728 // Return unique loop-exit projection or null if the loop has multiple exits.
729 IfFalseNode* unique_loop_exit_proj_or_null();
730
731 // Remove simplistic dead code from loop body
732 void DCE_loop_body();
733
734 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
735 // Replace with a 1-in-10 exit guess.
736 void adjust_loop_exit_prob( PhaseIdealLoop *phase );
737
738 // Return TRUE or FALSE if the loop should never be RCE'd or aligned.
739 // Useful for unrolling loops with NO array accesses.
740 bool policy_peel_only( PhaseIdealLoop *phase ) const;
741
742 // Return TRUE or FALSE if the loop should be unswitched -- clone
743 // loop with an invariant test
744 bool policy_unswitching( PhaseIdealLoop *phase ) const;
745 bool no_unswitch_candidate() const;
746
747 // Micro-benchmark spamming. Remove empty loops.
748 bool do_remove_empty_loop( PhaseIdealLoop *phase );
749
750 // Convert one-iteration loop into normal code.
751 bool do_one_iteration_loop( PhaseIdealLoop *phase );
752
753 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
754 // move some loop-invariant test (usually a null-check) before the loop.
755 bool policy_peeling(PhaseIdealLoop *phase);
756
757 uint estimate_peeling(PhaseIdealLoop *phase);
758
759 // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any
760 // known trip count in the counted loop node.
761 bool policy_maximally_unroll(PhaseIdealLoop *phase) const;
762
763 // Return TRUE or FALSE if the loop should be unrolled or not. Apply unroll
764 // if the loop is a counted loop and the loop body is small enough.
765 bool policy_unroll(PhaseIdealLoop *phase);
1651
1652 public:
1653 // Change the control input of expensive nodes to allow commoning by
1654 // IGVN when it is guaranteed to not result in a more frequent
1655 // execution of the expensive node. Return true if progress.
1656 bool process_expensive_nodes();
1657
1658 // Check whether node has become unreachable
1659 bool is_node_unreachable(Node *n) const {
1660 return !has_node(n) || n->is_unreachable(_igvn);
1661 }
1662
1663 // Eliminate range-checks and other trip-counter vs loop-invariant tests.
1664 void do_range_check(IdealLoopTree* loop);
1665
1666 // Clone loop with an invariant test (that does not exit) and
1667 // insert a clone of the test that selects which version to
1668 // execute.
1669 void do_unswitching(IdealLoopTree* loop, Node_List& old_new);
1670
1671 IfNode* find_unswitch_candidates(const IdealLoopTree* loop, Node_List& flat_array_checks) const;
1672 IfNode* find_unswitch_candidate_from_idoms(const IdealLoopTree* loop) const;
1673
1674 private:
1675 static bool has_control_dependencies_from_predicates(LoopNode* head);
1676 static void revert_to_normal_loop(const LoopNode* loop_head);
1677
1678 void hoist_invariant_check_casts(const IdealLoopTree* loop, const Node_List& old_new,
1679 const UnswitchCandidate& unswitch_candidate, const IfNode* loop_selector);
1680 void add_unswitched_loop_version_bodies_to_igvn(IdealLoopTree* loop, const Node_List& old_new);
1681 static void increment_unswitch_counts(LoopNode* original_head, LoopNode* new_head);
1682 void remove_unswitch_candidate_from_loops(const Node_List& old_new, const UnswitchedLoopSelector& unswitched_loop_selector);
1683 #ifndef PRODUCT
1684 static void trace_loop_unswitching_count(IdealLoopTree* loop, LoopNode* original_head);
1685 static void trace_loop_unswitching_impossible(const LoopNode* original_head);
1686 static void trace_loop_unswitching_result(const UnswitchedLoopSelector& unswitched_loop_selector,
1687 const UnswitchCandidate& unswitch_candidate,
1688 const LoopNode* original_head, const LoopNode* new_head);
1689 static void trace_loop_multiversioning_result(const LoopSelector& loop_selector,
1690 const LoopNode* original_head, const LoopNode* new_head);
1691 #endif
1692
1693 public:
1694
1695 // Range Check Elimination uses this function!
1696 // Constrain the main loop iterations so the affine function:
1697 // low_limit <= scale_con * I + offset < upper_limit
1698 // always holds true. That is, either increase the number of iterations in
1699 // the pre-loop or the post-loop until the condition holds true in the main
1700 // loop. Scale_con, offset and limit are all loop invariant.
1701 void add_constraint(jlong stride_con, jlong scale_con, Node* offset, Node* low_limit, Node* upper_limit, Node* pre_ctrl, Node** pre_limit, Node** main_limit);
1702 // Helper function for add_constraint().
1703 Node* adjust_limit(bool reduce, Node* scale, Node* offset, Node* rc_limit, Node* old_limit, Node* pre_ctrl, bool round);
1704
1705 // Partially peel loop up through last_peel node.
1706 bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
1707 bool duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new);
1859 void split_thru_phi_yank_old_nodes(Node* n, Node* region);
1860
1861 public:
1862
1863 // Conversion of fill/copy patterns into intrinsic versions
1864 bool do_intrinsify_fill();
1865 bool intrinsify_fill(IdealLoopTree* lpt);
1866 bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
1867 Node*& shift, Node*& offset);
1868
1869 private:
1870 // Helper functions
1871 Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache );
1872 Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true );
1873 void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true );
1874 bool split_up( Node *n, Node *blk1, Node *blk2 );
1875
1876 Node* place_outside_loop(Node* useblock, IdealLoopTree* loop) const;
1877 Node* try_move_store_before_loop(Node* n, Node *n_ctrl);
1878 void try_move_store_after_loop(Node* n);
1879 void move_flat_array_check_out_of_loop(Node* n);
1880 bool identical_backtoback_ifs(Node *n);
1881 bool flat_array_element_type_check(Node *n);
1882 bool can_split_if(Node *n_ctrl);
1883 bool cannot_split_division(const Node* n, const Node* region) const;
1884 static bool is_divisor_loop_phi(const Node* divisor, const Node* loop);
1885 bool loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const;
1886
1887 // Determine if a method is too big for a/another round of split-if, based on
1888 // a magic (approximate) ratio derived from the equally magic constant 35000,
1889 // previously used for this purpose (but without relating to the node limit).
1890 bool must_throttle_split_if() {
1891 uint threshold = C->max_node_limit() * 2 / 5;
1892 return C->live_nodes() > threshold;
1893 }
1894
1895 // A simplistic node request tracking mechanism, where
1896 // = UINT_MAX Request not valid or made final.
1897 // < UINT_MAX Nodes currently requested (estimate).
1898 uint _nodes_required;
1899
1900 enum { REQUIRE_MIN = 70 };
1901
2052 uint new_counter, Node_List& old_new, Node_List& worklist, Node_List*& split_if_set,
2053 Node_List*& split_bool_set, Node_List*& split_cex_set);
2054
2055 void finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set);
2056
2057 bool at_relevant_ctrl(Node* n, const Node* blk1, const Node* blk2);
2058
2059 bool clone_cmp_loadklass_down(Node* n, const Node* blk1, const Node* blk2);
2060 void clone_loadklass_nodes_at_cmp_index(const Node* n, Node* cmp, int i);
2061 bool clone_cmp_down(Node* n, const Node* blk1, const Node* blk2);
2062 void clone_template_assertion_expression_down(Node* node);
2063
2064 Node* similar_subtype_check(const Node* x, Node* r_in);
2065
2066 void update_addp_chain_base(Node* x, Node* old_base, Node* new_base);
2067
2068 bool can_move_to_inner_loop(Node* n, LoopNode* n_loop, Node* x);
2069
2070 void pin_nodes_dependent_on(Node* ctrl, bool old_iff_is_rangecheck);
2071
2072 void collect_flat_array_checks(const IdealLoopTree* loop, Node_List& flat_array_checks) const;
2073
2074 Node* ensure_node_and_inputs_are_above_pre_end(CountedLoopEndNode* pre_end, Node* node);
2075
2076 Node* new_assertion_predicate_opaque_init(Node* entry_control, Node* init, Node* int_zero);
2077
2078 bool try_make_short_running_loop(IdealLoopTree* loop, jint stride_con, const Node_List& range_checks, const uint iters_limit);
2079
2080 ConINode* intcon(jint i);
2081
2082 ConLNode* longcon(jlong i);
2083
2084 ConNode* makecon(const Type* t);
2085
2086 ConNode* integercon(jlong l, BasicType bt);
2087
2088 ConNode* zerocon(BasicType bt);
2089 };
2090
2091 class CountedLoopConverter {
2092 friend class PhaseIdealLoop;
2093
|