1 /*
2 * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_LOOPNODE_HPP
26 #define SHARE_OPTO_LOOPNODE_HPP
27
28 #include "opto/cfgnode.hpp"
29 #include "opto/multnode.hpp"
30 #include "opto/phaseX.hpp"
31 #include "opto/predicates.hpp"
32 #include "opto/subnode.hpp"
33 #include "opto/type.hpp"
34 #include "utilities/checkedCast.hpp"
35
36 class CmpNode;
37 class BaseCountedLoopEndNode;
38 class CountedLoopNode;
39 class IdealLoopTree;
40 class LoopNode;
41 class Node;
42 class OuterStripMinedLoopEndNode;
43 class PredicateBlock;
44 class PathFrequency;
45 class PhaseIdealLoop;
46 class LoopSelector;
47 class ReachabilityFenceNode;
48 class UnswitchedLoopSelector;
49 class VectorSet;
50 class VSharedData;
51 class Invariance;
52 struct small_cache;
53
54 //
55 // I D E A L I Z E D L O O P S
56 //
57 // Idealized loops are the set of loops I perform more interesting
58 // transformations on, beyond simple hoisting.
59
60 //------------------------------LoopNode---------------------------------------
61 // Simple loop header. Fall in path on left, loop-back path on right.
62 class LoopNode : public RegionNode {
63 // Size is bigger to hold the flags. However, the flags do not change
64 // the semantics so it does not appear in the hash & cmp functions.
65 virtual uint size_of() const { return sizeof(*this); }
66 protected:
67 uint _loop_flags;
68 // Names for flag bitfields
69 enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3,
70 MainHasNoPreLoop = 1<<2,
71 HasExactTripCount = 1<<3,
72 InnerLoop = 1<<4,
73 PartialPeelLoop = 1<<5,
74 PartialPeelFailed = 1<<6,
75 WasSlpAnalyzed = 1<<7,
76 PassedSlpAnalysis = 1<<8,
77 DoUnrollOnly = 1<<9,
78 VectorizedLoop = 1<<10,
79 HasAtomicPostLoop = 1<<11,
80 StripMined = 1<<12,
81 SubwordLoop = 1<<13,
82 ProfileTripFailed = 1<<14,
83 LoopNestInnerLoop = 1<<15,
84 LoopNestLongOuterLoop = 1<<16,
85 MultiversionFastLoop = 1<<17,
86 MultiversionSlowLoop = 2<<17,
87 MultiversionDelayedSlowLoop = 3<<17,
88 MultiversionFlagsMask = 3<<17,
89 };
90 char _unswitch_count;
91 enum { _unswitch_max=3 };
92
93 // Expected trip count from profile data
94 float _profile_trip_cnt;
95
96 public:
97 // Names for edge indices
98 enum { Self=0, EntryControl, LoopBackControl };
99
100 bool is_inner_loop() const { return _loop_flags & InnerLoop; }
101 void set_inner_loop() { _loop_flags |= InnerLoop; }
102
103 bool is_vectorized_loop() const { return _loop_flags & VectorizedLoop; }
104 bool is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; }
105 void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; }
106 bool partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
107 bool is_strip_mined() const { return _loop_flags & StripMined; }
108 bool is_profile_trip_failed() const { return _loop_flags & ProfileTripFailed; }
109 bool is_subword_loop() const { return _loop_flags & SubwordLoop; }
110 bool is_loop_nest_inner_loop() const { return _loop_flags & LoopNestInnerLoop; }
111 bool is_loop_nest_outer_loop() const { return _loop_flags & LoopNestLongOuterLoop; }
112
113 void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
114 void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; }
115 void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; }
116 void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; }
117 void mark_loop_vectorized() { _loop_flags |= VectorizedLoop; }
118 void mark_has_atomic_post_loop() { _loop_flags |= HasAtomicPostLoop; }
119 void mark_strip_mined() { _loop_flags |= StripMined; }
120 void clear_strip_mined() { _loop_flags &= ~StripMined; }
121 void mark_profile_trip_failed() { _loop_flags |= ProfileTripFailed; }
122 void mark_subword_loop() { _loop_flags |= SubwordLoop; }
123 void mark_loop_nest_inner_loop() { _loop_flags |= LoopNestInnerLoop; }
124 void mark_loop_nest_outer_loop() { _loop_flags |= LoopNestLongOuterLoop; }
125
126 int unswitch_max() { return _unswitch_max; }
127 int unswitch_count() { return _unswitch_count; }
128
129 void set_unswitch_count(int val) {
130 assert (val <= unswitch_max(), "too many unswitches");
131 _unswitch_count = val;
132 }
133
134 void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; }
135 float profile_trip_cnt() { return _profile_trip_cnt; }
136
137 #ifndef PRODUCT
138 uint _stress_peeling_attempts = 0;
139 #endif
140
141 LoopNode(Node *entry, Node *backedge)
142 : RegionNode(3), _loop_flags(0), _unswitch_count(0),
143 _profile_trip_cnt(COUNT_UNKNOWN) {
144 init_class_id(Class_Loop);
145 init_req(EntryControl, entry);
146 init_req(LoopBackControl, backedge);
147 }
148
149 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
150 virtual int Opcode() const;
151 bool can_be_counted_loop(PhaseValues* phase) const {
152 return req() == 3 && in(0) != nullptr &&
153 in(1) != nullptr && phase->type(in(1)) != Type::TOP &&
154 in(2) != nullptr && phase->type(in(2)) != Type::TOP;
155 }
156 bool is_valid_counted_loop(BasicType bt) const;
157 #ifndef PRODUCT
158 virtual void dump_spec(outputStream *st) const;
159 #endif
160
161 void verify_strip_mined(int expect_skeleton) const NOT_DEBUG_RETURN;
162 virtual LoopNode* skip_strip_mined(int expect_skeleton = 1) { return this; }
163 virtual IfTrueNode* outer_loop_tail() const { ShouldNotReachHere(); return nullptr; }
164 virtual OuterStripMinedLoopEndNode* outer_loop_end() const { ShouldNotReachHere(); return nullptr; }
165 virtual IfFalseNode* outer_loop_exit() const { ShouldNotReachHere(); return nullptr; }
166 virtual SafePointNode* outer_safepoint() const { ShouldNotReachHere(); return nullptr; }
167 };
168
169 //------------------------------Counted Loops----------------------------------
170 // Counted loops are all trip-counted loops, with exactly 1 trip-counter exit
171 // path (and maybe some other exit paths). The trip-counter exit is always
172 // last in the loop. The trip-counter have to stride by a constant;
173 // the exit value is also loop invariant.
174
175 // CountedLoopNodes and CountedLoopEndNodes come in matched pairs. The
176 // CountedLoopNode has the incoming loop control and the loop-back-control
177 // which is always the IfTrue before the matching CountedLoopEndNode. The
178 // CountedLoopEndNode has an incoming control (possibly not the
179 // CountedLoopNode if there is control flow in the loop), the post-increment
180 // trip-counter value, and the limit. The trip-counter value is always of
181 // the form (Op old-trip-counter stride). The old-trip-counter is produced
182 // by a Phi connected to the CountedLoopNode. The stride is constant.
183 // The Op is any commutable opcode, including Add, Mul, Xor. The
184 // CountedLoopEndNode also takes in the loop-invariant limit value.
185
186 // From a CountedLoopNode I can reach the matching CountedLoopEndNode via the
187 // loop-back control. From CountedLoopEndNodes I can reach CountedLoopNodes
188 // via the old-trip-counter from the Op node.
189
190 //------------------------------CountedLoopNode--------------------------------
191 // CountedLoopNodes head simple counted loops. CountedLoopNodes have as
192 // inputs the incoming loop-start control and the loop-back control, so they
193 // act like RegionNodes. They also take in the initial trip counter, the
194 // loop-invariant stride and the loop-invariant limit value. CountedLoopNodes
195 // produce a loop-body control and the trip counter value. Since
196 // CountedLoopNodes behave like RegionNodes I still have a standard CFG model.
197
198 class BaseCountedLoopNode : public LoopNode {
199 public:
200 BaseCountedLoopNode(Node *entry, Node *backedge)
201 : LoopNode(entry, backedge) {
202 }
203
204 Node *init_control() const { return in(EntryControl); }
205 Node *back_control() const { return in(LoopBackControl); }
206
207 Node* init_trip() const;
208 Node* stride() const;
209 bool stride_is_con() const;
210 Node* limit() const;
211 Node* incr() const;
212 Node* phi() const;
213
214 BaseCountedLoopEndNode* loopexit_or_null() const;
215 BaseCountedLoopEndNode* loopexit() const;
216
217 virtual BasicType bt() const = 0;
218
219 jlong stride_con() const;
220
221 static BaseCountedLoopNode* make(Node* entry, Node* backedge, BasicType bt);
222
223 virtual void set_trip_count(julong tc) = 0;
224 virtual julong trip_count() const = 0;
225
226 bool has_exact_trip_count() const { return (_loop_flags & HasExactTripCount) != 0; }
227 void set_exact_trip_count(julong tc) {
228 set_trip_count(tc);
229 _loop_flags |= HasExactTripCount;
230 }
231 void set_nonexact_trip_count() {
232 _loop_flags &= ~HasExactTripCount;
233 }
234 };
235
236
237 class CountedLoopNode : public BaseCountedLoopNode {
238 // Size is bigger to hold _main_idx. However, _main_idx does not change
239 // the semantics so it does not appear in the hash & cmp functions.
240 virtual uint size_of() const { return sizeof(*this); }
241
242 // For Pre- and Post-loops during debugging ONLY, this holds the index of
243 // the Main CountedLoop. Used to assert that we understand the graph shape.
244 node_idx_t _main_idx;
245
246 // Known trip count calculated by compute_exact_trip_count()
247 uint _trip_count;
248
249 // Log2 of original loop bodies in unrolled loop
250 int _unrolled_count_log2;
251
252 // Node count prior to last unrolling - used to decide if
253 // unroll,optimize,unroll,optimize,... is making progress
254 int _node_count_before_unroll;
255
256 // If slp analysis is performed we record the maximum
257 // vector mapped unroll factor here
258 int _slp_maximum_unroll_factor;
259
260 public:
261 CountedLoopNode(Node *entry, Node *backedge)
262 : BaseCountedLoopNode(entry, backedge), _main_idx(0), _trip_count(max_juint),
263 _unrolled_count_log2(0), _node_count_before_unroll(0),
264 _slp_maximum_unroll_factor(0) {
265 init_class_id(Class_CountedLoop);
266 // Initialize _trip_count to the largest possible value.
267 // Will be reset (lower) if the loop's trip count is known.
268 }
269
270 virtual int Opcode() const;
271 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
272
273 CountedLoopEndNode* loopexit_or_null() const { return (CountedLoopEndNode*) BaseCountedLoopNode::loopexit_or_null(); }
274 CountedLoopEndNode* loopexit() const { return (CountedLoopEndNode*) BaseCountedLoopNode::loopexit(); }
275 int stride_con() const;
276
277 // A 'main' loop has a pre-loop and a post-loop. The 'main' loop
278 // can run short a few iterations and may start a few iterations in.
279 // It will be RCE'd and unrolled and aligned.
280
281 // A following 'post' loop will run any remaining iterations. Used
282 // during Range Check Elimination, the 'post' loop will do any final
283 // iterations with full checks. Also used by Loop Unrolling, where
284 // the 'post' loop will do any epilog iterations needed. Basically,
285 // a 'post' loop can not profitably be further unrolled or RCE'd.
286
287 // A preceding 'pre' loop will run at least 1 iteration (to do peeling),
288 // it may do under-flow checks for RCE and may do alignment iterations
289 // so the following main loop 'knows' that it is striding down cache
290 // lines.
291
292 // A 'main' loop that is ONLY unrolled or peeled, never RCE'd or
293 // Aligned, may be missing it's pre-loop.
294 bool is_normal_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Normal; }
295 bool is_pre_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Pre; }
296 bool is_main_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Main; }
297 bool is_post_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Post; }
298 bool was_slp_analyzed () const { return (_loop_flags&WasSlpAnalyzed) == WasSlpAnalyzed; }
299 bool has_passed_slp () const { return (_loop_flags&PassedSlpAnalysis) == PassedSlpAnalysis; }
300 bool is_unroll_only () const { return (_loop_flags&DoUnrollOnly) == DoUnrollOnly; }
301 bool is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; }
302 bool has_atomic_post_loop () const { return (_loop_flags & HasAtomicPostLoop) == HasAtomicPostLoop; }
303 void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; }
304
305 IfNode* find_multiversion_if_from_multiversion_fast_main_loop();
306
307 int main_idx() const { return _main_idx; }
308
309 void set_trip_count(julong tc) {
310 assert(tc < max_juint, "Cannot set trip count to max_juint");
311 _trip_count = checked_cast<uint>(tc);
312 }
313 julong trip_count() const { return _trip_count; }
314
315 void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; }
316 void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; }
317 void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Post; _main_idx = main->_idx; }
318 void set_normal_loop( ) { _loop_flags &= ~PreMainPostFlagsMask; }
319
320 void set_notpassed_slp() {
321 _loop_flags &= ~PassedSlpAnalysis;
322 }
323
324 void double_unrolled_count() { _unrolled_count_log2++; }
325 int unrolled_count() { return 1 << MIN2(_unrolled_count_log2, BitsPerInt-3); }
326
327 void set_node_count_before_unroll(int ct) { _node_count_before_unroll = ct; }
328 int node_count_before_unroll() { return _node_count_before_unroll; }
329 void set_slp_max_unroll(int unroll_factor) { _slp_maximum_unroll_factor = unroll_factor; }
330 int slp_max_unroll() const { return _slp_maximum_unroll_factor; }
331
332 // Multiversioning allows us to duplicate a CountedLoop, and have two versions, and the multiversion_if
333 // decides which one is taken:
334 // (1) fast_loop: We enter this loop by default, by default the multiversion_if has its condition set to
335 // "true", guarded by a OpaqueMultiversioning. If we want to make a speculative assumption
336 // for an optimization, we can add the runtime-check to the multiversion_if, and if the
337 // assumption fails we take the slow_loop instead, where we do not make the same speculative
338 // assumption.
339 // We call it the "fast_loop" because it has more optimizations, enabled by the speculative
340 // runtime-checks at the multiversion_if, and we expect the fast_loop to execute faster.
341 // (2) slow_loop: By default, it is not taken, until a runtime-check is added to the multiversion_if while
342 // optimizing the fast_looop. If such a runtime-check is never added, then after loop-opts
343 // the multiversion_if constant folds to true, and the slow_loop is folded away. To save
344 // compile time, we delay the optimization of the slow_loop until a runtime-check is added
345 // to the multiversion_if, at which point we resume optimizations for the slow_loop.
346 // We call it the "slow_loop" because it has fewer optimizations, since this is the fall-back
347 // loop where we do not make any of the speculative assumptions we make for the fast_loop.
348 // Hence, we expect the slow_loop to execute slower.
349 bool is_multiversion() const { return (_loop_flags & MultiversionFlagsMask) != Normal; }
350 bool is_multiversion_fast_loop() const { return (_loop_flags & MultiversionFlagsMask) == MultiversionFastLoop; }
351 bool is_multiversion_slow_loop() const { return (_loop_flags & MultiversionFlagsMask) == MultiversionSlowLoop; }
352 bool is_multiversion_delayed_slow_loop() const { return (_loop_flags & MultiversionFlagsMask) == MultiversionDelayedSlowLoop; }
353 void set_multiversion_fast_loop() { assert(!is_multiversion(), ""); _loop_flags |= MultiversionFastLoop; }
354 void set_multiversion_slow_loop() { assert(!is_multiversion(), ""); _loop_flags |= MultiversionSlowLoop; }
355 void set_multiversion_delayed_slow_loop() { assert(!is_multiversion(), ""); _loop_flags |= MultiversionDelayedSlowLoop; }
356 void set_no_multiversion() { assert( is_multiversion(), ""); _loop_flags &= ~MultiversionFlagsMask; }
357
358 virtual LoopNode* skip_strip_mined(int expect_skeleton = 1);
359 OuterStripMinedLoopNode* outer_loop() const;
360 virtual IfTrueNode* outer_loop_tail() const;
361 virtual OuterStripMinedLoopEndNode* outer_loop_end() const;
362 virtual IfFalseNode* outer_loop_exit() const;
363 virtual SafePointNode* outer_safepoint() const;
364
365 Node* skip_assertion_predicates_with_halt();
366
367 virtual BasicType bt() const {
368 return T_INT;
369 }
370
371 Node* is_canonical_loop_entry();
372 CountedLoopEndNode* find_pre_loop_end();
373
374 Node* uncasted_init_trip(bool uncasted);
375
376 #ifndef PRODUCT
377 virtual void dump_spec(outputStream *st) const;
378 #endif
379 };
380
381 class LongCountedLoopNode : public BaseCountedLoopNode {
382 private:
383 virtual uint size_of() const { return sizeof(*this); }
384
385 // Known trip count calculated by compute_exact_trip_count()
386 julong _trip_count;
387
388 public:
389 LongCountedLoopNode(Node *entry, Node *backedge)
390 : BaseCountedLoopNode(entry, backedge), _trip_count(max_julong) {
391 init_class_id(Class_LongCountedLoop);
392 }
393
394 virtual int Opcode() const;
395
396 virtual BasicType bt() const {
397 return T_LONG;
398 }
399
400 void set_trip_count(julong tc) {
401 assert(tc < max_julong, "Cannot set trip count to max_julong");
402 _trip_count = tc;
403 }
404 julong trip_count() const { return _trip_count; }
405
406 LongCountedLoopEndNode* loopexit_or_null() const { return (LongCountedLoopEndNode*) BaseCountedLoopNode::loopexit_or_null(); }
407 LongCountedLoopEndNode* loopexit() const { return (LongCountedLoopEndNode*) BaseCountedLoopNode::loopexit(); }
408 };
409
410
411 //------------------------------CountedLoopEndNode-----------------------------
412 // CountedLoopEndNodes end simple trip counted loops. They act much like
413 // IfNodes.
414
415 class BaseCountedLoopEndNode : public IfNode {
416 public:
417 enum { TestControl, TestValue };
418 BaseCountedLoopEndNode(Node *control, Node *test, float prob, float cnt)
419 : IfNode(control, test, prob, cnt) {
420 init_class_id(Class_BaseCountedLoopEnd);
421 }
422
423 Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : nullptr; }
424 Node* incr() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(1) : nullptr; }
425 Node* limit() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(2) : nullptr; }
426 Node* stride() const { Node* tmp = incr(); return (tmp && tmp->req() == 3) ? tmp->in(2) : nullptr; }
427 Node* init_trip() const { Node* tmp = phi(); return (tmp && tmp->req() == 3) ? tmp->in(1) : nullptr; }
428 bool stride_is_con() const { Node *tmp = stride(); return (tmp != nullptr && tmp->is_Con()); }
429
430 PhiNode* phi() const {
431 Node* tmp = incr();
432 if (tmp && tmp->req() == 3) {
433 Node* phi = tmp->in(1);
434 if (phi->is_Phi()) {
435 return phi->as_Phi();
436 }
437 }
438 return nullptr;
439 }
440
441 BaseCountedLoopNode* loopnode() const {
442 // The CountedLoopNode that goes with this CountedLoopEndNode may
443 // have been optimized out by the IGVN so be cautious with the
444 // pattern matching on the graph
445 PhiNode* iv_phi = phi();
446 if (iv_phi == nullptr) {
447 return nullptr;
448 }
449 Node* ln = iv_phi->in(0);
450 if (!ln->is_BaseCountedLoop() || ln->as_BaseCountedLoop()->loopexit_or_null() != this) {
451 return nullptr;
452 }
453 if (ln->as_BaseCountedLoop()->bt() != bt()) {
454 return nullptr;
455 }
456 return ln->as_BaseCountedLoop();
457 }
458
459 BoolTest::mask test_trip() const { return in(TestValue)->as_Bool()->_test._test; }
460
461 jlong stride_con() const;
462 virtual BasicType bt() const = 0;
463
464 static BaseCountedLoopEndNode* make(Node* control, Node* test, float prob, float cnt, BasicType bt);
465 };
466
467 class CountedLoopEndNode : public BaseCountedLoopEndNode {
468 public:
469
470 CountedLoopEndNode(Node *control, Node *test, float prob, float cnt)
471 : BaseCountedLoopEndNode(control, test, prob, cnt) {
472 init_class_id(Class_CountedLoopEnd);
473 }
474 virtual int Opcode() const;
475
476 CountedLoopNode* loopnode() const {
477 return (CountedLoopNode*) BaseCountedLoopEndNode::loopnode();
478 }
479
480 virtual BasicType bt() const {
481 return T_INT;
482 }
483
484 #ifndef PRODUCT
485 virtual void dump_spec(outputStream *st) const;
486 #endif
487 };
488
489 class LongCountedLoopEndNode : public BaseCountedLoopEndNode {
490 public:
491 LongCountedLoopEndNode(Node *control, Node *test, float prob, float cnt)
492 : BaseCountedLoopEndNode(control, test, prob, cnt) {
493 init_class_id(Class_LongCountedLoopEnd);
494 }
495
496 LongCountedLoopNode* loopnode() const {
497 return (LongCountedLoopNode*) BaseCountedLoopEndNode::loopnode();
498 }
499
500 virtual int Opcode() const;
501
502 virtual BasicType bt() const {
503 return T_LONG;
504 }
505 };
506
507
508 inline BaseCountedLoopEndNode* BaseCountedLoopNode::loopexit_or_null() const {
509 Node* bctrl = back_control();
510 if (bctrl == nullptr) return nullptr;
511
512 Node* lexit = bctrl->in(0);
513 if (!lexit->is_BaseCountedLoopEnd()) {
514 return nullptr;
515 }
516 BaseCountedLoopEndNode* result = lexit->as_BaseCountedLoopEnd();
517 if (result->bt() != bt()) {
518 return nullptr;
519 }
520 return result;
521 }
522
523 inline BaseCountedLoopEndNode* BaseCountedLoopNode::loopexit() const {
524 BaseCountedLoopEndNode* cle = loopexit_or_null();
525 assert(cle != nullptr, "loopexit is null");
526 return cle;
527 }
528
529 inline Node* BaseCountedLoopNode::init_trip() const {
530 BaseCountedLoopEndNode* cle = loopexit_or_null();
531 return cle != nullptr ? cle->init_trip() : nullptr;
532 }
533 inline Node* BaseCountedLoopNode::stride() const {
534 BaseCountedLoopEndNode* cle = loopexit_or_null();
535 return cle != nullptr ? cle->stride() : nullptr;
536 }
537
538 inline bool BaseCountedLoopNode::stride_is_con() const {
539 BaseCountedLoopEndNode* cle = loopexit_or_null();
540 return cle != nullptr && cle->stride_is_con();
541 }
542 inline Node* BaseCountedLoopNode::limit() const {
543 BaseCountedLoopEndNode* cle = loopexit_or_null();
544 return cle != nullptr ? cle->limit() : nullptr;
545 }
546 inline Node* BaseCountedLoopNode::incr() const {
547 BaseCountedLoopEndNode* cle = loopexit_or_null();
548 return cle != nullptr ? cle->incr() : nullptr;
549 }
550 inline Node* BaseCountedLoopNode::phi() const {
551 BaseCountedLoopEndNode* cle = loopexit_or_null();
552 return cle != nullptr ? cle->phi() : nullptr;
553 }
554
555 inline jlong BaseCountedLoopNode::stride_con() const {
556 BaseCountedLoopEndNode* cle = loopexit_or_null();
557 return cle != nullptr ? cle->stride_con() : 0;
558 }
559
560
561 //------------------------------LoopLimitNode-----------------------------
562 // Counted Loop limit node which represents exact final iterator value:
563 // trip_count = (limit - init_trip + stride - 1)/stride
564 // final_value= trip_count * stride + init_trip.
565 // Use HW instructions to calculate it when it can overflow in integer.
566 // Note, final_value should fit into integer since counted loop has
567 // limit check: limit <= max_int-stride.
568 class LoopLimitNode : public Node {
569 enum { Init=1, Limit=2, Stride=3 };
570 public:
571 LoopLimitNode( Compile* C, Node *init, Node *limit, Node *stride ) : Node(nullptr,init,limit,stride) {
572 // Put it on the Macro nodes list to optimize during macro nodes expansion.
573 init_flags(Flag_is_macro);
574 C->add_macro_node(this);
575 }
576 virtual int Opcode() const;
577 virtual const Type *bottom_type() const { return TypeInt::INT; }
578 virtual uint ideal_reg() const { return Op_RegI; }
579 virtual const Type* Value(PhaseGVN* phase) const;
580 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
581 virtual Node* Identity(PhaseGVN* phase);
582 };
583
584 // Support for strip mining
585 class OuterStripMinedLoopNode : public LoopNode {
586 private:
587 void fix_sunk_stores_when_back_to_counted_loop(PhaseIterGVN* igvn, PhaseIdealLoop* iloop) const;
588 void handle_sunk_stores_when_finishing_construction(PhaseIterGVN* igvn);
589
590 public:
591 OuterStripMinedLoopNode(Compile* C, Node *entry, Node *backedge)
592 : LoopNode(entry, backedge) {
593 init_class_id(Class_OuterStripMinedLoop);
594 init_flags(Flag_is_macro);
595 C->add_macro_node(this);
596 }
597
598 virtual int Opcode() const;
599
600 virtual IfTrueNode* outer_loop_tail() const;
601 virtual OuterStripMinedLoopEndNode* outer_loop_end() const;
602 virtual IfFalseNode* outer_loop_exit() const;
603 virtual SafePointNode* outer_safepoint() const;
604 CountedLoopNode* inner_counted_loop() const { return unique_ctrl_out()->as_CountedLoop(); }
605 CountedLoopEndNode* inner_counted_loop_end() const { return inner_counted_loop()->loopexit(); }
606 IfFalseNode* inner_loop_exit() const { return inner_counted_loop_end()->false_proj(); }
607
608 void adjust_strip_mined_loop(PhaseIterGVN* igvn);
609
610 void remove_outer_loop_and_safepoint(PhaseIterGVN* igvn) const;
611
612 void transform_to_counted_loop(PhaseIterGVN* igvn, PhaseIdealLoop* iloop);
613
614 static Node* register_new_node(Node* node, LoopNode* ctrl, PhaseIterGVN* igvn, PhaseIdealLoop* iloop);
615
616 Node* register_control(Node* node, Node* loop, Node* idom, PhaseIterGVN* igvn,
617 PhaseIdealLoop* iloop);
618 };
619
620 class OuterStripMinedLoopEndNode : public IfNode {
621 public:
622 OuterStripMinedLoopEndNode(Node *control, Node *test, float prob, float cnt)
623 : IfNode(control, test, prob, cnt) {
624 init_class_id(Class_OuterStripMinedLoopEnd);
625 }
626
627 virtual int Opcode() const;
628
629 virtual const Type* Value(PhaseGVN* phase) const;
630 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
631
632 bool is_expanded(PhaseGVN *phase) const;
633 };
634
635 // -----------------------------IdealLoopTree----------------------------------
636 class IdealLoopTree : public ResourceObj {
637 public:
638 IdealLoopTree *_parent; // Parent in loop tree
639 IdealLoopTree *_next; // Next sibling in loop tree
640 IdealLoopTree *_child; // First child in loop tree
641
642 // The head-tail backedge defines the loop.
643 // If a loop has multiple backedges, this is addressed during cleanup where
644 // we peel off the multiple backedges, merging all edges at the bottom and
645 // ensuring that one proper backedge flow into the loop.
646 Node *_head; // Head of loop
647 Node *_tail; // Tail of loop
648 inline Node *tail(); // Handle lazy update of _tail field
649 inline Node *head(); // Handle lazy update of _head field
650 PhaseIdealLoop* _phase;
651 int _local_loop_unroll_limit;
652 int _local_loop_unroll_factor;
653
654 Node_List _body; // Loop body for inner loops
655
656 uint16_t _nest; // Nesting depth
657 uint8_t _irreducible:1, // True if irreducible
658 _has_call:1, // True if has call safepoint
659 _has_sfpt:1, // True if has non-call safepoint
660 _rce_candidate:1, // True if candidate for range check elimination
661 _has_range_checks:1,
662 _has_range_checks_computed:1;
663
664 Node_List* _safepts; // List of safepoints in this loop
665 Node_List* _required_safept; // A inner loop cannot delete these safepts;
666 Node_List* _reachability_fences; // List of reachability fences in this loop
667 bool _allow_optimizations; // Allow loop optimizations
668
669 IdealLoopTree(PhaseIdealLoop* phase, Node* head, Node* tail);
670
671 // Is 'l' a member of 'this'?
672 bool is_member(const IdealLoopTree *l) const; // Test for nested membership
673
674 // Set loop nesting depth. Accumulate has_call bits.
675 int set_nest( uint depth );
676
677 // Split out multiple fall-in edges from the loop header. Move them to a
678 // private RegionNode before the loop. This becomes the loop landing pad.
679 void split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt );
680
681 // Split out the outermost loop from this shared header.
682 void split_outer_loop( PhaseIdealLoop *phase );
683
684 // Merge all the backedges from the shared header into a private Region.
685 // Feed that region as the one backedge to this loop.
686 void merge_many_backedges( PhaseIdealLoop *phase );
687
688 // Split shared headers and insert loop landing pads.
689 // Insert a LoopNode to replace the RegionNode.
690 // Returns TRUE if loop tree is structurally changed.
691 bool beautify_loops( PhaseIdealLoop *phase );
692
693 // Perform optimization to use the loop predicates for null checks and range checks.
694 // Applies to any loop level (not just the innermost one)
695 bool loop_predication( PhaseIdealLoop *phase);
696 bool can_apply_loop_predication();
697
698 // Perform iteration-splitting on inner loops. Split iterations to
699 // avoid range checks or one-shot null checks. Returns false if the
700 // current round of loop opts should stop.
701 bool iteration_split( PhaseIdealLoop *phase, Node_List &old_new );
702
703 // Driver for various flavors of iteration splitting. Returns false
704 // if the current round of loop opts should stop.
705 bool iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new );
706
707 // Given dominators, try to find loops with calls that must always be
708 // executed (call dominates loop tail). These loops do not need non-call
709 // safepoints (ncsfpt).
710 void check_safepts(VectorSet &visited, Node_List &stack);
711
712 // Allpaths backwards scan from loop tail, terminating each path at first safepoint
713 // encountered.
714 void allpaths_check_safepts(VectorSet &visited, Node_List &stack);
715
716 // Remove safepoints from loop. Optionally keeping one.
717 void remove_safepoints(PhaseIdealLoop* phase, bool keep_one);
718
719 // Convert to counted loops where possible
720 void counted_loop( PhaseIdealLoop *phase );
721
722 // Check for Node being a loop-breaking test
723 Node *is_loop_exit(Node *iff) const;
724
725 // Return unique loop-exit projection or null if the loop has multiple exits.
726 IfFalseNode* unique_loop_exit_proj_or_null();
727
728 // Remove simplistic dead code from loop body
729 void DCE_loop_body();
730
731 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
732 // Replace with a 1-in-10 exit guess.
733 void adjust_loop_exit_prob( PhaseIdealLoop *phase );
734
735 // Return TRUE or FALSE if the loop should never be RCE'd or aligned.
736 // Useful for unrolling loops with NO array accesses.
737 bool policy_peel_only( PhaseIdealLoop *phase ) const;
738
739 // Return TRUE or FALSE if the loop should be unswitched -- clone
740 // loop with an invariant test
741 bool policy_unswitching( PhaseIdealLoop *phase ) const;
742
743 // Micro-benchmark spamming. Remove empty loops.
744 bool do_remove_empty_loop( PhaseIdealLoop *phase );
745
746 // Convert one-iteration loop into normal code.
747 bool do_one_iteration_loop( PhaseIdealLoop *phase );
748
749 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
750 // move some loop-invariant test (usually a null-check) before the loop.
751 bool policy_peeling(PhaseIdealLoop *phase);
752
753 uint estimate_peeling(PhaseIdealLoop *phase);
754
755 // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any
756 // known trip count in the counted loop node.
757 bool policy_maximally_unroll(PhaseIdealLoop *phase) const;
758
759 // Return TRUE or FALSE if the loop should be unrolled or not. Apply unroll
760 // if the loop is a counted loop and the loop body is small enough.
761 bool policy_unroll(PhaseIdealLoop *phase);
762
763 // Loop analyses to map to a maximal superword unrolling for vectorization.
764 void policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct);
765
766 // Return TRUE or FALSE if the loop should be range-check-eliminated.
767 // Gather a list of IF tests that are dominated by iteration splitting;
768 // also gather the end of the first split and the start of the 2nd split.
769 bool policy_range_check(PhaseIdealLoop* phase, bool provisional, BasicType bt) const;
770
771 // Return TRUE if "iff" is a range check.
772 bool is_range_check_if(IfProjNode* if_success_proj, PhaseIdealLoop* phase, Invariance& invar DEBUG_ONLY(COMMA ProjNode* predicate_proj)) const;
773 bool is_range_check_if(IfProjNode* if_success_proj, PhaseIdealLoop* phase, BasicType bt, Node* iv, Node*& range, Node*& offset,
774 jlong& scale) const;
775
776 // Estimate the number of nodes required when cloning a loop (body).
777 uint est_loop_clone_sz(uint factor) const;
778 // Estimate the number of nodes required when unrolling a loop (body).
779 uint est_loop_unroll_sz(uint factor) const;
780
781 // Compute loop trip count if possible
782 void compute_trip_count(PhaseIdealLoop* phase, BasicType bt);
783
784 // Compute loop trip count from profile data
785 float compute_profile_trip_cnt_helper(Node* n);
786 void compute_profile_trip_cnt( PhaseIdealLoop *phase );
787
788 // Reassociate invariant expressions.
789 void reassociate_invariants(PhaseIdealLoop *phase);
790 // Reassociate invariant binary expressions.
791 Node* reassociate(Node* n1, PhaseIdealLoop *phase);
792 // Reassociate invariant add, subtract, and compare expressions.
793 Node* reassociate_add_sub_cmp(Node* n1, int inv1_idx, int inv2_idx, PhaseIdealLoop* phase);
794 // Return nonzero index of invariant operand if invariant and variant
795 // are combined with an associative binary. Helper for reassociate_invariants.
796 int find_invariant(Node* n, PhaseIdealLoop *phase);
797 // Return TRUE if "n" is associative.
798 bool is_associative(Node* n, Node* base=nullptr);
799 // Return TRUE if "n" is an associative cmp node.
800 bool is_associative_cmp(Node* n);
801
802 // Return true if n is invariant
803 bool is_invariant(Node* n) const;
804
805 // Put loop body on igvn work list
806 void record_for_igvn();
807
808 bool is_root() { return _parent == nullptr; }
809 // A proper/reducible loop w/o any (occasional) dead back-edge.
810 bool is_loop() { return !_irreducible && !tail()->is_top(); }
811 bool is_counted() { return is_loop() && _head->is_CountedLoop(); }
812 bool is_innermost() { return is_loop() && _child == nullptr; }
813
814 void remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase);
815
816 bool compute_has_range_checks() const;
817 bool range_checks_present() {
818 if (!_has_range_checks_computed) {
819 if (compute_has_range_checks()) {
820 _has_range_checks = 1;
821 }
822 _has_range_checks_computed = 1;
823 }
824 return _has_range_checks;
825 }
826
827 // Return the parent's IdealLoopTree for a strip mined loop which is the outer strip mined loop.
828 // In all other cases, return this.
829 IdealLoopTree* skip_strip_mined() {
830 return _head->as_Loop()->is_strip_mined() ? _parent : this;
831 }
832
833 // Registers a reachability fence node in the loop.
834 void register_reachability_fence(ReachabilityFenceNode* rf);
835
836 #ifndef PRODUCT
837 void dump_head(); // Dump loop head only
838 void dump(); // Dump this loop recursively
839 #endif
840
841 #ifdef ASSERT
842 GrowableArray<IdealLoopTree*> collect_sorted_children() const;
843 bool verify_tree(IdealLoopTree* loop_verify) const;
844 #endif
845
846 private:
847 enum { EMPTY_LOOP_SIZE = 7 }; // Number of nodes in an empty loop.
848
849 // Estimate the number of nodes resulting from control and data flow merge.
850 uint est_loop_flow_merge_sz() const;
851
852 // Check if the number of residual iterations is large with unroll_cnt.
853 // Return true if the residual iterations are more than 10% of the trip count.
854 bool is_residual_iters_large(int unroll_cnt, CountedLoopNode *cl) const {
855 return (unroll_cnt - 1) * (100.0 / LoopPercentProfileLimit) > cl->profile_trip_cnt();
856 }
857
858 void collect_loop_core_nodes(PhaseIdealLoop* phase, Unique_Node_List& wq) const;
859
860 bool empty_loop_with_data_nodes(PhaseIdealLoop* phase) const;
861
862 void enqueue_data_nodes(PhaseIdealLoop* phase, Unique_Node_List& empty_loop_nodes, Unique_Node_List& wq) const;
863
864 bool process_safepoint(PhaseIdealLoop* phase, Unique_Node_List& empty_loop_nodes, Unique_Node_List& wq,
865 Node* sfpt) const;
866
867 bool empty_loop_candidate(PhaseIdealLoop* phase) const;
868
869 bool empty_loop_with_extra_nodes_candidate(PhaseIdealLoop* phase) const;
870 };
871
872 // -----------------------------PhaseIdealLoop---------------------------------
873 // Computes the mapping from Nodes to IdealLoopTrees. Organizes IdealLoopTrees
874 // into a loop tree. Drives the loop-based transformations on the ideal graph.
875 class PhaseIdealLoop : public PhaseTransform {
876 friend class IdealLoopTree;
877 friend class SuperWord;
878 friend class ShenandoahBarrierC2Support;
879 friend class AutoNodeBudget;
880
881 Arena _arena; // For data whose lifetime is a single pass of loop optimizations
882
883 // Map loop membership for CFG nodes, and ctrl for non-CFG nodes.
884 //
885 // Exception: dead CFG nodes may instead have a ctrl/idom forwarding
886 // installed. See: forward_ctrl
887 Node_List _loop_or_ctrl;
888
889 // Pre-computed def-use info
890 PhaseIterGVN &_igvn;
891
892 // Head of loop tree
893 IdealLoopTree* _ltree_root;
894
895 // Array of pre-order numbers, plus post-visited bit.
896 // ZERO for not pre-visited. EVEN for pre-visited but not post-visited.
897 // ODD for post-visited. Other bits are the pre-order number.
898 uint *_preorders;
899 uint _max_preorder;
900
901 ReallocMark _nesting; // Safety checks for arena reallocation
902
903 const PhaseIdealLoop* _verify_me;
904 bool _verify_only;
905
906 // Allocate _preorders[] array
907 void allocate_preorders() {
908 _max_preorder = C->unique()+8;
909 _preorders = NEW_RESOURCE_ARRAY(uint, _max_preorder);
910 memset(_preorders, 0, sizeof(uint) * _max_preorder);
911 }
912
913 // Allocate _preorders[] array
914 void reallocate_preorders() {
915 _nesting.check(); // Check if a potential re-allocation in the resource arena is safe
916 if ( _max_preorder < C->unique() ) {
917 _preorders = REALLOC_RESOURCE_ARRAY(_preorders, _max_preorder, C->unique());
918 _max_preorder = C->unique();
919 }
920 memset(_preorders, 0, sizeof(uint) * _max_preorder);
921 }
922
923 // Check to grow _preorders[] array for the case when build_loop_tree_impl()
924 // adds new nodes.
925 void check_grow_preorders( ) {
926 _nesting.check(); // Check if a potential re-allocation in the resource arena is safe
927 if ( _max_preorder < C->unique() ) {
928 uint newsize = _max_preorder<<1; // double size of array
929 _preorders = REALLOC_RESOURCE_ARRAY(_preorders, _max_preorder, newsize);
930 memset(&_preorders[_max_preorder],0,sizeof(uint)*(newsize-_max_preorder));
931 _max_preorder = newsize;
932 }
933 }
934 // Check for pre-visited. Zero for NOT visited; non-zero for visited.
935 int is_visited( Node *n ) const { return _preorders[n->_idx]; }
936 // Pre-order numbers are written to the Nodes array as low-bit-set values.
937 void set_preorder_visited( Node *n, int pre_order ) {
938 assert( !is_visited( n ), "already set" );
939 _preorders[n->_idx] = (pre_order<<1);
940 };
941 // Return pre-order number.
942 int get_preorder( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]>>1; }
943
944 // Check for being post-visited.
945 // Should be previsited already (checked with assert(is_visited(n))).
946 int is_postvisited( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]&1; }
947
948 // Mark as post visited
949 void set_postvisited( Node *n ) { assert( !is_postvisited( n ), "" ); _preorders[n->_idx] |= 1; }
950
951 public:
952 // Set/get control node out. Set lower bit to distinguish from IdealLoopTree
953 // Returns true if "n" is a data node, false if it's a CFG node.
954 //
955 // Exception:
956 // control nodes that are dead because of "replace_node_and_forward_ctrl"
957 // or have otherwise modified their ctrl state by "forward_ctrl".
958 // They return "true", because they have a ctrl "forwarding" to the other ctrl node they
959 // were replaced with.
960 bool has_ctrl(const Node* n) const { return ((intptr_t)_loop_or_ctrl[n->_idx]) & 1; }
961
962 private:
963 // clear out dead code after build_loop_late
964 Node_List _deadlist;
965 Node_List _zero_trip_guard_opaque_nodes;
966 Node_List _multiversion_opaque_nodes;
967
968 // Support for faster execution of get_late_ctrl()/dom_lca()
969 // when a node has many uses and dominator depth is deep.
970 GrowableArray<jlong> _dom_lca_tags;
971 uint _dom_lca_tags_round;
972 void init_dom_lca_tags();
973
974 // Helper for debugging bad dominance relationships
975 bool verify_dominance(Node* n, Node* use, Node* LCA, Node* early);
976
977 Node* compute_lca_of_uses(Node* n, Node* early, bool verify = false);
978
979 // Inline wrapper for frequent cases:
980 // 1) only one use
981 // 2) a use is the same as the current LCA passed as 'n1'
982 Node *dom_lca_for_get_late_ctrl( Node *lca, Node *n, Node *tag ) {
983 assert( n->is_CFG(), "" );
984 // Fast-path null lca
985 if( lca != nullptr && lca != n ) {
986 assert( lca->is_CFG(), "" );
987 // find LCA of all uses
988 n = dom_lca_for_get_late_ctrl_internal( lca, n, tag );
989 }
990 return find_non_split_ctrl(n);
991 }
992 Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag );
993
994 // Helper function for directing control inputs away from CFG split points.
995 Node *find_non_split_ctrl( Node *ctrl ) const {
996 if (ctrl != nullptr) {
997 if (ctrl->is_MultiBranch()) {
998 ctrl = ctrl->in(0);
999 }
1000 assert(ctrl->is_CFG(), "CFG");
1001 }
1002 return ctrl;
1003 }
1004
1005 void cast_incr_before_loop(Node* incr, Node* ctrl, CountedLoopNode* loop);
1006
1007 #ifdef ASSERT
1008 static void ensure_zero_trip_guard_proj(Node* node, bool is_main_loop);
1009 #endif
1010 private:
1011 static void get_opaque_template_assertion_predicate_nodes(ParsePredicateSuccessProj* parse_predicate_proj,
1012 Unique_Node_List& list);
1013 void update_main_loop_assertion_predicates(CountedLoopNode* new_main_loop_head, int stride_con_before_unroll);
1014 void initialize_assertion_predicates_for_peeled_loop(CountedLoopNode* peeled_loop_head,
1015 CountedLoopNode* remaining_loop_head,
1016 uint first_node_index_in_cloned_loop_body,
1017 const Node_List& old_new);
1018 void initialize_assertion_predicates_for_main_loop(CountedLoopNode* pre_loop_head,
1019 CountedLoopNode* main_loop_head,
1020 uint first_node_index_in_pre_loop_body,
1021 uint last_node_index_in_pre_loop_body,
1022 DEBUG_ONLY(uint last_node_index_from_backedge_goo COMMA)
1023 const Node_List& old_new);
1024 void initialize_assertion_predicates_for_post_loop(CountedLoopNode* main_loop_head, CountedLoopNode* post_loop_head,
1025 uint first_node_index_in_cloned_loop_body);
1026 void create_assertion_predicates_at_loop(CountedLoopNode* source_loop_head, CountedLoopNode* target_loop_head,
1027 const NodeInLoopBody& _node_in_loop_body, bool kill_old_template);
1028 void create_assertion_predicates_at_main_or_post_loop(CountedLoopNode* source_loop_head,
1029 CountedLoopNode* target_loop_head,
1030 const NodeInLoopBody& _node_in_loop_body,
1031 bool kill_old_template);
1032 void rewire_old_target_loop_entry_dependency_to_new_entry(CountedLoopNode* target_loop_head,
1033 const Node* old_target_loop_entry,
1034 uint node_index_before_new_assertion_predicate_nodes);
1035 void log_loop_tree();
1036
1037 public:
1038
1039 PhaseIterGVN &igvn() const { return _igvn; }
1040
1041 Arena* arena() { return &_arena; };
1042
1043 bool has_node(const Node* n) const {
1044 guarantee(n != nullptr, "No Node.");
1045 return _loop_or_ctrl[n->_idx] != nullptr;
1046 }
1047 // check if transform created new nodes that need _ctrl recorded
1048 Node *get_late_ctrl( Node *n, Node *early );
1049 Node *get_early_ctrl( Node *n );
1050 Node *get_early_ctrl_for_expensive(Node *n, Node* earliest);
1051 void set_early_ctrl(Node* n, bool update_body);
1052 void set_subtree_ctrl(Node* n, bool update_body);
1053 void set_ctrl( Node *n, Node *ctrl ) {
1054 assert( !has_node(n) || has_ctrl(n), "" );
1055 assert( ctrl->in(0), "cannot set dead control node" );
1056 assert( ctrl == find_non_split_ctrl(ctrl), "must set legal crtl" );
1057 _loop_or_ctrl.map(n->_idx, (Node*)((intptr_t)ctrl + 1));
1058 }
1059 void set_root_as_ctrl(Node* n) {
1060 assert(!has_node(n) || has_ctrl(n), "");
1061 _loop_or_ctrl.map(n->_idx, (Node*)((intptr_t)C->root() + 1));
1062 }
1063 // Set control and update loop membership
1064 void set_ctrl_and_loop(Node* n, Node* ctrl) {
1065 IdealLoopTree* old_loop = get_loop(get_ctrl(n));
1066 IdealLoopTree* new_loop = get_loop(ctrl);
1067 if (old_loop != new_loop) {
1068 if (old_loop->_child == nullptr) old_loop->_body.yank(n);
1069 if (new_loop->_child == nullptr) new_loop->_body.push(n);
1070 }
1071 set_ctrl(n, ctrl);
1072 }
1073
1074 // Retrieves the ctrl for a data node i.
1075 Node* get_ctrl(const Node* i) {
1076 assert(has_node(i) && has_ctrl(i), "must be data node with ctrl");
1077 Node* n = get_ctrl_no_update(i);
1078 // We store the found ctrl in the side-table again. In most cases,
1079 // this is a no-op, since we just read from _loop_or_ctrl. But in cases
1080 // where there was a ctrl forwarding via dead ctrl nodes, this shortens the path.
1081 // See: forward_ctrl
1082 _loop_or_ctrl.map(i->_idx, (Node*)((intptr_t)n + 1));
1083 assert(has_node(i) && has_ctrl(i), "must still be data node with ctrl");
1084 assert(n == find_non_split_ctrl(n), "must return legal ctrl");
1085 return n;
1086 }
1087
1088 bool is_dominator(Node* dominator, Node* n);
1089 bool is_strict_dominator(Node* dominator, Node* n);
1090
1091 // return get_ctrl for a data node and self(n) for a CFG node
1092 Node* ctrl_or_self(Node* n) {
1093 if (has_ctrl(n))
1094 return get_ctrl(n);
1095 else {
1096 assert (n->is_CFG(), "must be a CFG node");
1097 return n;
1098 }
1099 }
1100
1101 private:
1102 Node* get_ctrl_no_update_helper(const Node* i) const {
1103 // We expect only data nodes (which must have a ctrl set), or
1104 // dead ctrl nodes that have a ctrl "forwarding".
1105 // See: forward_ctrl.
1106 assert(has_ctrl(i), "only data nodes or ctrl nodes with ctrl forwarding expected");
1107 return (Node*)(((intptr_t)_loop_or_ctrl[i->_idx]) & ~1);
1108 }
1109
1110 // Compute the ctrl of node i, jumping over ctrl forwardings.
1111 Node* get_ctrl_no_update(const Node* i) const {
1112 assert(has_ctrl(i), "only data nodes expected");
1113 Node* n = get_ctrl_no_update_helper(i);
1114 if (n->in(0) == nullptr) {
1115 // We encountered a dead CFG node.
1116 // If everything went right, this dead CFG node should have had a ctrl
1117 // forwarding installed, using "forward_ctrl". We now have to jump from
1118 // the old (dead) ctrl node to the new (live) ctrl node, in possibly
1119 // multiple ctrl forwarding steps.
1120 do {
1121 n = get_ctrl_no_update_helper(n);
1122 } while (n->in(0) == nullptr);
1123 n = find_non_split_ctrl(n);
1124 }
1125 return n;
1126 }
1127
1128 public:
1129 // Check for loop being set
1130 // "n" must be a control node. Returns true if "n" is known to be in a loop.
1131 bool has_loop( Node *n ) const {
1132 assert(!has_node(n) || !has_ctrl(n), "");
1133 return has_node(n);
1134 }
1135 // Set loop
1136 void set_loop( Node *n, IdealLoopTree *loop ) {
1137 _loop_or_ctrl.map(n->_idx, (Node*)loop);
1138 }
1139
1140 // Install a ctrl "forwarding" from an old (dead) control node.
1141 // This is a "lazy" update of the "get_ctrl" and "idom" mechanism:
1142 // - Install a forwarding from old_node (dead ctrl) to new_node.
1143 // - When querying "get_ctrl": jump from data node over possibly
1144 // multiple dead ctrl nodes with ctrl forwarding to eventually
1145 // reach a live ctrl node. Shorten the path to avoid chasing the
1146 // forwarding in the future.
1147 // - When querying "idom": from some node get its old idom, which
1148 // may be dead but has an idom forwarding to the new and live
1149 // idom. Shorten the path to avoid chasing the forwarding in the
1150 // future.
1151 // Note: while the "idom" information is stored in the "_idom"
1152 // side-table, the idom forwarding piggybacks on the ctrl
1153 // forwarding on "_loop_or_ctrl".
1154 // Using "forward_ctrl" allows us to only edit the entry for the old
1155 // dead node now, and we do not have to update all the nodes that had
1156 // the old_node as their "get_ctrl" or "idom". We clean up the forwarding
1157 // links when we query "get_ctrl" or "idom" for these nodes the next time.
1158 void forward_ctrl(Node* old_node, Node* new_node) {
1159 assert(!has_ctrl(old_node) && old_node->is_CFG() && old_node->in(0) == nullptr,
1160 "must be dead ctrl (CFG) node");
1161 assert(!has_ctrl(new_node) && new_node->is_CFG() && new_node->in(0) != nullptr,
1162 "must be live ctrl (CFG) node");
1163 assert(old_node != new_node, "no cycles please");
1164 // Re-use the side array slot for this node to provide the
1165 // forwarding pointer.
1166 _loop_or_ctrl.map(old_node->_idx, (Node*)((intptr_t)new_node + 1));
1167 assert(has_ctrl(old_node), "must have installed ctrl forwarding");
1168 }
1169
1170 // Replace the old ctrl node with a new ctrl node.
1171 // - Update the node inputs of all uses.
1172 // - Lazily update the ctrl and idom info of all uses, via a ctrl/idom forwarding.
1173 void replace_node_and_forward_ctrl(Node* old_node, Node* new_node) {
1174 _igvn.replace_node(old_node, new_node);
1175 forward_ctrl(old_node, new_node);
1176 }
1177
1178 void remove_dead_data_node(Node* dead) {
1179 assert(dead->outcnt() == 0 && !dead->is_top(), "must be dead");
1180 assert(!dead->is_CFG(), "not a data node");
1181 Node* c = get_ctrl(dead);
1182 IdealLoopTree* lpt = get_loop(c);
1183 _loop_or_ctrl.map(dead->_idx, nullptr); // This node is useless
1184 lpt->_body.yank(dead);
1185 igvn().remove_dead_node(dead, PhaseIterGVN::NodeOrigin::Graph);
1186 }
1187
1188 private:
1189
1190 // Place 'n' in some loop nest, where 'n' is a CFG node
1191 void build_loop_tree();
1192 int build_loop_tree_impl(Node* n, int pre_order);
1193 // Insert loop into the existing loop tree. 'innermost' is a leaf of the
1194 // loop tree, not the root.
1195 IdealLoopTree *sort( IdealLoopTree *loop, IdealLoopTree *innermost );
1196
1197 #ifdef ASSERT
1198 // verify that regions in irreducible loops are marked is_in_irreducible_loop
1199 void verify_regions_in_irreducible_loops();
1200 bool is_in_irreducible_loop(RegionNode* region);
1201 #endif
1202
1203 // Place Data nodes in some loop nest
1204 void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
1205 void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
1206 void build_loop_late_post_work(Node* n, bool pinned);
1207 void build_loop_late_post(Node* n);
1208 void verify_strip_mined_scheduling(Node *n, Node* least);
1209
1210 // Array of immediate dominance info for each CFG node indexed by node idx
1211 private:
1212 uint _idom_size;
1213 Node **_idom; // Array of immediate dominators
1214 uint *_dom_depth; // Used for fast LCA test
1215 GrowableArray<uint>* _dom_stk; // For recomputation of dom depth
1216 LoopOptsMode _mode;
1217
1218 // build the loop tree and perform any requested optimizations
1219 void build_and_optimize();
1220
1221 // Dominators for the sea of nodes
1222 void Dominators();
1223
1224 // Compute the Ideal Node to Loop mapping
1225 PhaseIdealLoop(PhaseIterGVN& igvn, LoopOptsMode mode) :
1226 PhaseTransform(Ideal_Loop),
1227 _arena(mtCompiler, Arena::Tag::tag_idealloop),
1228 _loop_or_ctrl(&_arena),
1229 _igvn(igvn),
1230 _verify_me(nullptr),
1231 _verify_only(false),
1232 _mode(mode),
1233 _nodes_required(UINT_MAX) {
1234 assert(mode != LoopOptsVerify, "wrong constructor to verify IdealLoop");
1235 build_and_optimize();
1236 }
1237
1238 #ifndef PRODUCT
1239 // Verify that verify_me made the same decisions as a fresh run
1240 // or only verify that the graph is valid if verify_me is null.
1241 PhaseIdealLoop(PhaseIterGVN& igvn, const PhaseIdealLoop* verify_me = nullptr) :
1242 PhaseTransform(Ideal_Loop),
1243 _arena(mtCompiler, Arena::Tag::tag_idealloop),
1244 _loop_or_ctrl(&_arena),
1245 _igvn(igvn),
1246 _verify_me(verify_me),
1247 _verify_only(verify_me == nullptr),
1248 _mode(LoopOptsVerify),
1249 _nodes_required(UINT_MAX) {
1250 DEBUG_ONLY(C->set_phase_verify_ideal_loop();)
1251 build_and_optimize();
1252 DEBUG_ONLY(C->reset_phase_verify_ideal_loop();)
1253 }
1254 #endif
1255
1256 Node* insert_convert_node_if_needed(BasicType target, Node* input);
1257
1258 Node* idom_no_update(Node* d) const {
1259 return idom_no_update(d->_idx);
1260 }
1261
1262 Node* idom_no_update(uint node_idx) const {
1263 assert(node_idx < _idom_size, "oob");
1264 Node* n = _idom[node_idx];
1265 assert(n != nullptr,"Bad immediate dominator info.");
1266 while (n->in(0) == nullptr) { // Skip dead CFG nodes
1267 // We encountered a dead CFG node.
1268 // If everything went right, this dead CFG node should have had an idom
1269 // forwarding installed, using "forward_ctrl". We now have to jump from
1270 // the old (dead) idom node to the new (live) idom node, in possibly
1271 // multiple idom forwarding steps.
1272 // Note that we piggyback on "_loop_or_ctrl" to do the forwarding,
1273 // since we forward both "get_ctrl" and "idom" from the dead to the
1274 // new live ctrl/idom nodes.
1275 n = (Node*)(((intptr_t)_loop_or_ctrl[n->_idx]) & ~1);
1276 assert(n != nullptr,"Bad immediate dominator info.");
1277 }
1278 return n;
1279 }
1280
1281 public:
1282 Node* idom(Node* n) const {
1283 return idom(n->_idx);
1284 }
1285
1286 Node* idom(uint node_idx) const {
1287 Node* n = idom_no_update(node_idx);
1288 // We store the found idom in the side-table again. In most cases,
1289 // this is a no-op, since we just read from _idom. But in cases where
1290 // there was an idom forwarding via dead idom nodes, this shortens the path.
1291 // See: forward_ctrl
1292 _idom[node_idx] = n;
1293 return n;
1294 }
1295
1296 uint dom_depth(Node* d) const {
1297 guarantee(d != nullptr, "Null dominator info.");
1298 guarantee(d->_idx < _idom_size, "");
1299 return _dom_depth[d->_idx];
1300 }
1301 void set_idom(Node* d, Node* n, uint dom_depth);
1302 // Locally compute IDOM using dom_lca call
1303 Node *compute_idom( Node *region ) const;
1304 // Recompute dom_depth
1305 void recompute_dom_depth();
1306
1307 // Is safept not required by an outer loop?
1308 bool is_deleteable_safept(Node* sfpt) const;
1309
1310 // Replace parallel induction variable (parallel to trip counter)
1311 void replace_parallel_iv(IdealLoopTree *loop);
1312
1313 Node *dom_lca( Node *n1, Node *n2 ) const {
1314 return find_non_split_ctrl(dom_lca_internal(n1, n2));
1315 }
1316 Node *dom_lca_internal( Node *n1, Node *n2 ) const;
1317
1318 Node* dominated_node(Node* c1, Node* c2) {
1319 assert(is_dominator(c1, c2) || is_dominator(c2, c1), "nodes must be related");
1320 return is_dominator(c1, c2) ? c2 : c1;
1321 }
1322
1323 // Return control node that's dominated by the 2 others
1324 Node* dominated_node(Node* c1, Node* c2, Node* c3) {
1325 return dominated_node(c1, dominated_node(c2, c3));
1326 }
1327
1328 // Build and verify the loop tree without modifying the graph. This
1329 // is useful to verify that all inputs properly dominate their uses.
1330 static void verify(PhaseIterGVN& igvn) {
1331 #ifdef ASSERT
1332 ResourceMark rm;
1333 Compile::TracePhase tp(_t_idealLoopVerify);
1334 PhaseIdealLoop v(igvn);
1335 #endif
1336 }
1337
1338 // Recommended way to use PhaseIdealLoop.
1339 // Run PhaseIdealLoop in some mode and allocates a local scope for memory allocations.
1340 static void optimize(PhaseIterGVN &igvn, LoopOptsMode mode) {
1341 ResourceMark rm;
1342 PhaseIdealLoop v(igvn, mode);
1343
1344 Compile* C = Compile::current();
1345 if (!C->failing()) {
1346 // Cleanup any modified bits
1347 igvn.optimize();
1348 if (C->failing()) { return; }
1349 v.log_loop_tree();
1350 }
1351 }
1352
1353 // True if the method has at least 1 irreducible loop
1354 bool _has_irreducible_loops;
1355
1356 // Per-Node transform
1357 virtual Node* transform(Node* n) { return nullptr; }
1358
1359 Node* loop_exit_control(const IdealLoopTree* loop) const;
1360
1361 class LoopExitTest {
1362 bool _is_valid;
1363
1364 const Node* _back_control;
1365 const IdealLoopTree* _loop;
1366 PhaseIdealLoop* _phase;
1367
1368 Node* _cmp;
1369 Node* _incr;
1370 Node* _limit;
1371 BoolTest::mask _mask;
1372 float _cl_prob;
1373
1374 public:
1375 LoopExitTest(const Node* back_control, const IdealLoopTree* loop, PhaseIdealLoop* phase) :
1376 _is_valid(false),
1377 _back_control(back_control),
1378 _loop(loop),
1379 _phase(phase),
1380 _cmp(nullptr),
1381 _incr(nullptr),
1382 _limit(nullptr),
1383 _mask(BoolTest::illegal),
1384 _cl_prob(0.0f) {}
1385
1386 void build();
1387 void canonicalize_mask(jlong stride_con);
1388
1389 bool is_valid_with_bt(BasicType bt) const {
1390 return _is_valid && _cmp != nullptr && _cmp->Opcode() == Op_Cmp(bt);
1391 }
1392
1393 bool should_include_limit() const { return _mask == BoolTest::le || _mask == BoolTest::ge; }
1394
1395 CmpNode* cmp() const { return _cmp->as_Cmp(); }
1396 Node* incr() const { return _incr; }
1397 Node* limit() const { return _limit; }
1398 BoolTest::mask mask() const { return _mask; }
1399 float cl_prob() const { return _cl_prob; }
1400 };
1401
1402 class LoopIVIncr {
1403 bool _is_valid;
1404
1405 const Node* _head;
1406 const IdealLoopTree* _loop;
1407
1408 Node* _incr;
1409 Node* _phi_incr;
1410
1411 public:
1412 LoopIVIncr(const Node* head, const IdealLoopTree* loop) :
1413 _is_valid(false),
1414 _head(head),
1415 _loop(loop),
1416 _incr(nullptr),
1417 _phi_incr(nullptr) {}
1418
1419 void build(Node* old_incr);
1420
1421 bool is_valid() const { return _is_valid; }
1422 bool is_valid_with_bt(const BasicType bt) const {
1423 return _is_valid && _incr->Opcode() == Op_Add(bt);
1424 }
1425
1426 Node* incr() const { return _incr; }
1427 Node* phi_incr() const { return _phi_incr; }
1428 };
1429
1430 class LoopIVStride {
1431 bool _is_valid;
1432
1433 BasicType _iv_bt;
1434 Node* _stride_node;
1435 Node* _xphi;
1436
1437 public:
1438 LoopIVStride(BasicType iv_bt) :
1439 _is_valid(false),
1440 _iv_bt(iv_bt),
1441 _stride_node(nullptr),
1442 _xphi(nullptr) {}
1443
1444 void build(const Node* incr);
1445
1446 bool is_valid() const { return _is_valid && _stride_node != nullptr; }
1447 Node* stride_node() const { return _stride_node; }
1448 Node* xphi() const { return _xphi; }
1449
1450 jlong compute_non_zero_stride_con(BoolTest::mask mask, BasicType iv_bt) const;
1451 };
1452
1453 static PhiNode* loop_iv_phi(const Node* xphi, const Node* phi_incr, const Node* head);
1454
1455 bool try_convert_to_counted_loop(Node* head, IdealLoopTree*& loop, BasicType iv_bt);
1456
1457 Node* loop_nest_replace_iv(Node* iv_to_replace, Node* inner_iv, Node* outer_phi, Node* inner_head, BasicType bt);
1458 bool create_loop_nest(IdealLoopTree* loop, Node_List &old_new);
1459
1460 void add_parse_predicate(Deoptimization::DeoptReason reason, Node* inner_head, IdealLoopTree* loop, SafePointNode* sfpt);
1461 SafePointNode* find_safepoint(Node* back_control, const Node* head, const IdealLoopTree* loop);
1462
1463 void add_parse_predicates(IdealLoopTree* outer_ilt, LoopNode* inner_head, SafePointNode* cloned_sfpt);
1464
1465 IdealLoopTree* insert_outer_loop(IdealLoopTree* loop, LoopNode* outer_l, Node* outer_ift);
1466 IdealLoopTree* create_outer_strip_mined_loop(Node* init_control,
1467 IdealLoopTree* loop, float cl_prob, float le_fcnt,
1468 Node*& entry_control, Node*& iffalse);
1469
1470 Node* exact_limit( IdealLoopTree *loop );
1471
1472 // Return a post-walked LoopNode
1473 IdealLoopTree* get_loop(const Node* n) const {
1474 // Dead nodes have no loop, so return the top level loop instead
1475 if (!has_node(n)) return _ltree_root;
1476 assert(!has_ctrl(n), "");
1477 return (IdealLoopTree*)_loop_or_ctrl[n->_idx];
1478 }
1479
1480 IdealLoopTree* ltree_root() const { return _ltree_root; }
1481
1482 // Is 'n' a (nested) member of 'loop'?
1483 bool is_member(const IdealLoopTree* loop, const Node* n) const {
1484 return loop->is_member(get_loop(n));
1485 }
1486
1487 // is the control for 'n' a (nested) member of 'loop'?
1488 bool ctrl_is_member(const IdealLoopTree* loop, const Node* n) {
1489 return is_member(loop, get_ctrl(n));
1490 }
1491
1492 // This is the basic building block of the loop optimizations. It clones an
1493 // entire loop body. It makes an old_new loop body mapping; with this
1494 // mapping you can find the new-loop equivalent to an old-loop node. All
1495 // new-loop nodes are exactly equal to their old-loop counterparts, all
1496 // edges are the same. All exits from the old-loop now have a RegionNode
1497 // that merges the equivalent new-loop path. This is true even for the
1498 // normal "loop-exit" condition. All uses of loop-invariant old-loop values
1499 // now come from (one or more) Phis that merge their new-loop equivalents.
1500 // Parameter side_by_side_idom:
1501 // When side_by_size_idom is null, the dominator tree is constructed for
1502 // the clone loop to dominate the original. Used in construction of
1503 // pre-main-post loop sequence.
1504 // When nonnull, the clone and original are side-by-side, both are
1505 // dominated by the passed in side_by_side_idom node. Used in
1506 // construction of unswitched loops.
1507 enum CloneLoopMode {
1508 IgnoreStripMined = 0, // Only clone inner strip mined loop
1509 CloneIncludesStripMined = 1, // clone both inner and outer strip mined loops
1510 ControlAroundStripMined = 2 // Only clone inner strip mined loop,
1511 // result control flow branches
1512 // either to inner clone or outer
1513 // strip mined loop.
1514 };
1515 void clone_loop( IdealLoopTree *loop, Node_List &old_new, int dom_depth,
1516 CloneLoopMode mode, Node* side_by_side_idom = nullptr);
1517 void clone_loop_handle_data_uses(Node* old, Node_List &old_new,
1518 IdealLoopTree* loop, IdealLoopTree* companion_loop,
1519 Node_List*& split_if_set, Node_List*& split_bool_set,
1520 Node_List*& split_cex_set, Node_List& worklist,
1521 uint new_counter, CloneLoopMode mode);
1522 void clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
1523 IdealLoopTree* outer_loop, int dd, Node_List &old_new,
1524 Node_List& extra_data_nodes);
1525
1526 // If we got the effect of peeling, either by actually peeling or by
1527 // making a pre-loop which must execute at least once, we can remove
1528 // all loop-invariant dominated tests in the main body.
1529 void peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new );
1530
1531 // Generate code to do a loop peel for the given loop (and body).
1532 // old_new is a temp array.
1533 void do_peeling( IdealLoopTree *loop, Node_List &old_new );
1534
1535 // Add pre and post loops around the given loop. These loops are used
1536 // during RCE, unrolling and aligning loops.
1537 void insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only );
1538
1539 // Find the last store in the body of an OuterStripMinedLoop when following memory uses
1540 Node *find_last_store_in_outer_loop(Node* store, const IdealLoopTree* outer_loop);
1541
1542 // Add post loop after the given loop.
1543 Node *insert_post_loop(IdealLoopTree* loop, Node_List& old_new,
1544 CountedLoopNode* main_head, CountedLoopEndNode* main_end,
1545 Node* incr, Node* limit, CountedLoopNode*& post_head);
1546
1547 // Add a vector post loop between a vector main loop and the current post loop
1548 void insert_vector_post_loop(IdealLoopTree *loop, Node_List &old_new);
1549 // If Node n lives in the back_ctrl block, we clone a private version of n
1550 // in preheader_ctrl block and return that, otherwise return n.
1551 Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones );
1552
1553 // Take steps to maximally unroll the loop. Peel any odd iterations, then
1554 // unroll to do double iterations. The next round of major loop transforms
1555 // will repeat till the doubled loop body does all remaining iterations in 1
1556 // pass.
1557 void do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new );
1558
1559 // Unroll the loop body one step - make each trip do 2 iterations.
1560 void do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip );
1561
1562 // Return true if exp is a constant times an induction var
1563 bool is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_scale, bool* p_short_scale, int depth = 0);
1564
1565 bool is_iv(Node* exp, Node* iv, BasicType bt);
1566
1567 // Return true if exp is a scaled induction var plus (or minus) constant
1568 bool is_scaled_iv_plus_offset(Node* exp, Node* iv, BasicType bt, jlong* p_scale, Node** p_offset, bool* p_short_scale = nullptr, int depth = 0);
1569 bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset) {
1570 jlong long_scale;
1571 if (is_scaled_iv_plus_offset(exp, iv, T_INT, &long_scale, p_offset)) {
1572 int int_scale = checked_cast<int>(long_scale);
1573 if (p_scale != nullptr) {
1574 *p_scale = int_scale;
1575 }
1576 return true;
1577 }
1578 return false;
1579 }
1580 // Helper for finding more complex matches to is_scaled_iv_plus_offset.
1581 bool is_scaled_iv_plus_extra_offset(Node* exp1, Node* offset2, Node* iv,
1582 BasicType bt,
1583 jlong* p_scale, Node** p_offset,
1584 bool* p_short_scale, int depth);
1585
1586 // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted
1587 IfTrueNode* create_new_if_for_predicate(const ParsePredicateSuccessProj* parse_predicate_proj, Node* new_entry,
1588 Deoptimization::DeoptReason reason, int opcode,
1589 bool rewire_uncommon_proj_phi_inputs = false);
1590
1591 private:
1592 // Helper functions for create_new_if_for_predicate()
1593 void set_ctrl_of_nodes_with_same_ctrl(Node* start_node, ProjNode* old_uncommon_proj, Node* new_uncommon_proj);
1594 Unique_Node_List find_nodes_with_same_ctrl(Node* node, const ProjNode* ctrl);
1595 Node* clone_nodes_with_same_ctrl(Node* start_node, ProjNode* old_uncommon_proj, Node* new_uncommon_proj);
1596 void fix_cloned_data_node_controls(const ProjNode* orig, Node* new_uncommon_proj,
1597 const OrigToNewHashtable& orig_to_clone);
1598
1599 public:
1600 void register_control(Node* n, IdealLoopTree *loop, Node* pred, bool update_body = true);
1601
1602 // Replace the control input of 'node' with 'new_control' and set the dom depth to the one of 'new_control'.
1603 void replace_control(Node* node, Node* new_control) {
1604 _igvn.replace_input_of(node, 0, new_control);
1605 set_idom(node, new_control, dom_depth(new_control));
1606 }
1607
1608 void replace_loop_entry(LoopNode* loop_head, Node* new_entry) {
1609 _igvn.replace_input_of(loop_head, LoopNode::EntryControl, new_entry);
1610 set_idom(loop_head, new_entry, dom_depth(new_entry));
1611 }
1612
1613 // Construct a range check for a predicate if
1614 BoolNode* rc_predicate(Node* ctrl, int scale, Node* offset, Node* init, Node* limit,
1615 jint stride, Node* range, bool upper, bool& overflow);
1616
1617 // Implementation of the loop predication to promote checks outside the loop
1618 bool loop_predication_impl(IdealLoopTree *loop);
1619
1620 // Reachability Fence (RF) support.
1621 private:
1622 void insert_rf(Node* ctrl, Node* referent);
1623 void replace_rf(Node* old_node, Node* new_node);
1624 void remove_rf(ReachabilityFenceNode* rf);
1625 public:
1626 bool optimize_reachability_fences();
1627 bool expand_reachability_fences();
1628
1629 private:
1630 bool loop_predication_impl_helper(IdealLoopTree* loop, IfProjNode* if_success_proj,
1631 ParsePredicateSuccessProj* parse_predicate_proj, CountedLoopNode* cl, ConNode* zero,
1632 Invariance& invar, Deoptimization::DeoptReason deopt_reason);
1633 bool can_create_loop_predicates(const PredicateBlock* profiled_loop_predicate_block) const;
1634 bool loop_predication_should_follow_branches(IdealLoopTree* loop, float& loop_trip_cnt);
1635 void loop_predication_follow_branches(Node *c, IdealLoopTree *loop, float loop_trip_cnt,
1636 PathFrequency& pf, Node_Stack& stack, VectorSet& seen,
1637 Node_List& if_proj_list);
1638 IfTrueNode* create_template_assertion_predicate(CountedLoopNode* loop_head, ParsePredicateNode* parse_predicate,
1639 IfProjNode* new_control, int scale, Node* offset, Node* range);
1640 void eliminate_hoisted_range_check(IfTrueNode* hoisted_check_proj, IfTrueNode* template_assertion_predicate_proj);
1641
1642 // Helper function to collect predicate for eliminating the useless ones
1643 void eliminate_useless_predicates() const;
1644
1645 void eliminate_useless_zero_trip_guard();
1646 void eliminate_useless_multiversion_if();
1647
1648 public:
1649 // Change the control input of expensive nodes to allow commoning by
1650 // IGVN when it is guaranteed to not result in a more frequent
1651 // execution of the expensive node. Return true if progress.
1652 bool process_expensive_nodes();
1653
1654 // Check whether node has become unreachable
1655 bool is_node_unreachable(Node *n) const {
1656 return !has_node(n) || n->is_unreachable(_igvn);
1657 }
1658
1659 // Eliminate range-checks and other trip-counter vs loop-invariant tests.
1660 void do_range_check(IdealLoopTree* loop);
1661
1662 // Clone loop with an invariant test (that does not exit) and
1663 // insert a clone of the test that selects which version to
1664 // execute.
1665 void do_unswitching(IdealLoopTree* loop, Node_List& old_new);
1666
1667 IfNode* find_unswitch_candidate(const IdealLoopTree* loop) const;
1668
1669 private:
1670 static bool has_control_dependencies_from_predicates(LoopNode* head);
1671 static void revert_to_normal_loop(const LoopNode* loop_head);
1672
1673 void hoist_invariant_check_casts(const IdealLoopTree* loop, const Node_List& old_new,
1674 const UnswitchedLoopSelector& unswitched_loop_selector);
1675 void add_unswitched_loop_version_bodies_to_igvn(IdealLoopTree* loop, const Node_List& old_new);
1676 static void increment_unswitch_counts(LoopNode* original_head, LoopNode* new_head);
1677 void remove_unswitch_candidate_from_loops(const Node_List& old_new, const UnswitchedLoopSelector& unswitched_loop_selector);
1678 #ifndef PRODUCT
1679 static void trace_loop_unswitching_count(IdealLoopTree* loop, LoopNode* original_head);
1680 static void trace_loop_unswitching_impossible(const LoopNode* original_head);
1681 static void trace_loop_unswitching_result(const UnswitchedLoopSelector& unswitched_loop_selector,
1682 const LoopNode* original_head, const LoopNode* new_head);
1683 static void trace_loop_multiversioning_result(const LoopSelector& loop_selector,
1684 const LoopNode* original_head, const LoopNode* new_head);
1685 #endif
1686
1687 public:
1688
1689 // Range Check Elimination uses this function!
1690 // Constrain the main loop iterations so the affine function:
1691 // low_limit <= scale_con * I + offset < upper_limit
1692 // always holds true. That is, either increase the number of iterations in
1693 // the pre-loop or the post-loop until the condition holds true in the main
1694 // loop. Scale_con, offset and limit are all loop invariant.
1695 void add_constraint(jlong stride_con, jlong scale_con, Node* offset, Node* low_limit, Node* upper_limit, Node* pre_ctrl, Node** pre_limit, Node** main_limit);
1696 // Helper function for add_constraint().
1697 Node* adjust_limit(bool reduce, Node* scale, Node* offset, Node* rc_limit, Node* old_limit, Node* pre_ctrl, bool round);
1698
1699 // Partially peel loop up through last_peel node.
1700 bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
1701 bool duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new);
1702
1703 // AutoVectorize the loop: replace scalar ops with vector ops.
1704 enum AutoVectorizeStatus {
1705 Impossible, // This loop has the wrong shape to even try vectorization.
1706 Success, // We just successfully vectorized the loop.
1707 TriedAndFailed, // We tried to vectorize, but failed.
1708 };
1709 AutoVectorizeStatus auto_vectorize(IdealLoopTree* lpt, VSharedData &vshared);
1710
1711 void maybe_multiversion_for_auto_vectorization_runtime_checks(IdealLoopTree* lpt, Node_List& old_new);
1712 void do_multiversioning(IdealLoopTree* lpt, Node_List& old_new);
1713 IfTrueNode* create_new_if_for_multiversion(IfTrueNode* multiversioning_fast_proj);
1714 bool try_resume_optimizations_for_delayed_slow_loop(IdealLoopTree* lpt);
1715
1716 // Create a scheduled list of nodes control dependent on ctrl set.
1717 void scheduled_nodelist( IdealLoopTree *loop, VectorSet& ctrl, Node_List &sched );
1718 // Has a use in the vector set
1719 bool has_use_in_set( Node* n, VectorSet& vset );
1720 // Has use internal to the vector set (ie. not in a phi at the loop head)
1721 bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop );
1722 // clone "n" for uses that are outside of loop
1723 int clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist );
1724 // clone "n" for special uses that are in the not_peeled region
1725 void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
1726 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist );
1727 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
1728 void insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp );
1729 #ifdef ASSERT
1730 // Validate the loop partition sets: peel and not_peel
1731 bool is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, VectorSet& not_peel );
1732 // Ensure that uses outside of loop are of the right form
1733 bool is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
1734 uint orig_exit_idx, uint clone_exit_idx);
1735 bool is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx);
1736 #endif
1737
1738 // Returns nonzero constant stride if-node is a possible iv test (otherwise returns zero.)
1739 int stride_of_possible_iv( Node* iff );
1740 bool is_possible_iv_test( Node* iff ) { return stride_of_possible_iv(iff) != 0; }
1741 // Return the (unique) control output node that's in the loop (if it exists.)
1742 Node* stay_in_loop( Node* n, IdealLoopTree *loop);
1743 // Insert a signed compare loop exit cloned from an unsigned compare.
1744 IfNode* insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop);
1745 void remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop);
1746 // Utility to register node "n" with PhaseIdealLoop
1747 void register_node(Node* n, IdealLoopTree* loop, Node* pred, uint ddepth);
1748 // Utility to create an if-projection
1749 ProjNode* proj_clone(ProjNode* p, IfNode* iff);
1750 // Force the iff control output to be the live_proj
1751 Node* short_circuit_if(IfNode* iff, ProjNode* live_proj);
1752 // Insert a region before an if projection
1753 RegionNode* insert_region_before_proj(ProjNode* proj);
1754 // Insert a new if before an if projection
1755 ProjNode* insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj);
1756
1757 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
1758 // "Nearly" because all Nodes have been cloned from the original in the loop,
1759 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
1760 // through the Phi recursively, and return a Bool.
1761 Node* clone_iff(PhiNode* phi);
1762 CmpNode* clone_bool(PhiNode* phi);
1763
1764
1765 // Rework addressing expressions to get the most loop-invariant stuff
1766 // moved out. We'd like to do all associative operators, but it's especially
1767 // important (common) to do address expressions.
1768 Node* remix_address_expressions(Node* n);
1769 Node* remix_address_expressions_add_left_shift(Node* n, IdealLoopTree* n_loop, Node* n_ctrl, BasicType bt);
1770
1771 // Convert add to muladd to generate MuladdS2I under certain criteria
1772 Node * convert_add_to_muladd(Node * n);
1773
1774 // Attempt to use a conditional move instead of a phi/branch
1775 Node *conditional_move( Node *n );
1776
1777 // Check for aggressive application of 'split-if' optimization,
1778 // using basic block level info.
1779 void split_if_with_blocks ( VectorSet &visited, Node_Stack &nstack);
1780 Node *split_if_with_blocks_pre ( Node *n );
1781 void split_if_with_blocks_post( Node *n );
1782 Node *has_local_phi_input( Node *n );
1783 // Mark an IfNode as being dominated by a prior test,
1784 // without actually altering the CFG (and hence IDOM info).
1785 void dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip = false, bool prev_dom_not_imply_this = false);
1786 void rewire_safe_outputs_to_dominator(Node* source, Node* dominator, bool dominator_not_imply_source);
1787
1788 // Split Node 'n' through merge point
1789 RegionNode* split_thru_region(Node* n, RegionNode* region);
1790 // Split Node 'n' through merge point if there is enough win.
1791 Node *split_thru_phi( Node *n, Node *region, int policy );
1792 // Found an If getting its condition-code input from a Phi in the
1793 // same block. Split thru the Region.
1794 void do_split_if(Node *iff, RegionNode** new_false_region = nullptr, RegionNode** new_true_region = nullptr);
1795
1796 private:
1797 // Class to keep track of wins in split_thru_phi.
1798 class SplitThruPhiWins {
1799 private:
1800 // Region containing the phi we are splitting through.
1801 const Node* _region;
1802
1803 // Sum of all wins regardless of where they happen. This applies to Loops phis as well as non-loop phis.
1804 int _total_wins;
1805
1806 // For Loops, wins have different impact depending on if they happen on loop entry or on the backedge.
1807 // Number of wins on a loop entry edge if the split is through a loop head,
1808 // otherwise 0. Entry edge wins only pay dividends once on loop entry.
1809 int _loop_entry_wins;
1810 // Number of wins on a loop back-edge, which pay dividends on every iteration.
1811 int _loop_back_wins;
1812
1813 public:
1814 SplitThruPhiWins(const Node* region) :
1815 _region(region),
1816 _total_wins(0),
1817 _loop_entry_wins(0),
1818 _loop_back_wins(0) {};
1819
1820 void reset() {_total_wins = 0; _loop_entry_wins = 0; _loop_back_wins = 0;}
1821 void add_win(int ctrl_index) {
1822 if (_region->is_Loop() && ctrl_index == LoopNode::EntryControl) {
1823 _loop_entry_wins++;
1824 } else if (_region->is_Loop() && ctrl_index == LoopNode::LoopBackControl) {
1825 _loop_back_wins++;
1826 }
1827 _total_wins++;
1828 }
1829 // Is this split profitable with respect to the policy?
1830 bool profitable(int policy) const {
1831 assert(_region->is_Loop() || (_loop_entry_wins == 0 && _loop_back_wins == 0), "wins on loop edges without a loop");
1832 assert(!_region->is_Loop() || _total_wins == _loop_entry_wins + _loop_back_wins, "missed some win");
1833 // In general this means that the split has to have more wins than specified
1834 // in the policy. However, for loops we need to take into account where the
1835 // wins happen. We need to be careful when splitting, because splitting nodes
1836 // related to the iv through the phi can sufficiently rearrange the loop
1837 // structure to prevent RCE and thus vectorization. Thus, we only deem splitting
1838 // profitable if the win of a split is not on the entry edge, as such wins
1839 // only pay off once and have a high chance of messing up the loop structure.
1840 return (_loop_entry_wins == 0 && _total_wins > policy) ||
1841 // If there are wins on the entry edge but the backadge also has sufficient wins,
1842 // there is sufficient profitability to spilt regardless of the risk of messing
1843 // up the loop structure.
1844 _loop_back_wins > policy ||
1845 // If the policy is less than 0, a split is always profitable, i.e. we always
1846 // split. This is needed when we split a node and then must also split a
1847 // dependant node, i.e. spliting a Bool node after splitting a Cmp node.
1848 policy < 0;
1849 }
1850 };
1851
1852
1853 void split_thru_phi_yank_old_nodes(Node* n, Node* region);
1854
1855 public:
1856
1857 // Conversion of fill/copy patterns into intrinsic versions
1858 bool do_intrinsify_fill();
1859 bool intrinsify_fill(IdealLoopTree* lpt);
1860 bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
1861 Node*& shift, Node*& offset);
1862
1863 private:
1864 // Helper functions
1865 Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache );
1866 Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true );
1867 void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true );
1868 bool split_up( Node *n, Node *blk1, Node *blk2 );
1869
1870 Node* place_outside_loop(Node* useblock, IdealLoopTree* loop) const;
1871 Node* try_move_store_before_loop(Node* n, Node *n_ctrl);
1872 void try_move_store_after_loop(Node* n);
1873 bool identical_backtoback_ifs(Node *n);
1874 bool can_split_if(Node *n_ctrl);
1875 bool cannot_split_division(const Node* n, const Node* region) const;
1876 static bool is_divisor_loop_phi(const Node* divisor, const Node* loop);
1877 bool loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const;
1878
1879 // Determine if a method is too big for a/another round of split-if, based on
1880 // a magic (approximate) ratio derived from the equally magic constant 35000,
1881 // previously used for this purpose (but without relating to the node limit).
1882 bool must_throttle_split_if() {
1883 uint threshold = C->max_node_limit() * 2 / 5;
1884 return C->live_nodes() > threshold;
1885 }
1886
1887 // A simplistic node request tracking mechanism, where
1888 // = UINT_MAX Request not valid or made final.
1889 // < UINT_MAX Nodes currently requested (estimate).
1890 uint _nodes_required;
1891
1892 enum { REQUIRE_MIN = 70 };
1893
1894 uint nodes_required() const { return _nodes_required; }
1895
1896 // Given the _currently_ available number of nodes, check whether there is
1897 // "room" for an additional request or not, considering the already required
1898 // number of nodes. Return TRUE if the new request is exceeding the node
1899 // budget limit, otherwise return FALSE. Note that this interpretation will
1900 // act pessimistic on additional requests when new nodes have already been
1901 // generated since the 'begin'. This behaviour fits with the intention that
1902 // node estimates/requests should be made upfront.
1903 bool exceeding_node_budget(uint required = 0) {
1904 assert(C->live_nodes() < C->max_node_limit(), "sanity");
1905 uint available = C->max_node_limit() - C->live_nodes();
1906 return available < required + _nodes_required + REQUIRE_MIN;
1907 }
1908
1909 uint require_nodes(uint require, uint minreq = REQUIRE_MIN) {
1910 precond(require > 0);
1911 _nodes_required += MAX2(require, minreq);
1912 return _nodes_required;
1913 }
1914
1915 bool may_require_nodes(uint require, uint minreq = REQUIRE_MIN) {
1916 return !exceeding_node_budget(require) && require_nodes(require, minreq) > 0;
1917 }
1918
1919 uint require_nodes_begin() {
1920 assert(_nodes_required == UINT_MAX, "Bad state (begin).");
1921 _nodes_required = 0;
1922 return C->live_nodes();
1923 }
1924
1925 // When a node request is final, optionally check that the requested number
1926 // of nodes was reasonably correct with respect to the number of new nodes
1927 // introduced since the last 'begin'. Always check that we have not exceeded
1928 // the maximum node limit.
1929 void require_nodes_final(uint live_at_begin, bool check_estimate) {
1930 assert(_nodes_required < UINT_MAX, "Bad state (final).");
1931
1932 #ifdef ASSERT
1933 if (check_estimate) {
1934 // Check that the node budget request was not off by too much (x2).
1935 // Should this be the case we _surely_ need to improve the estimates
1936 // used in our budget calculations.
1937 if (C->live_nodes() - live_at_begin > 2 * _nodes_required) {
1938 log_info(compilation)("Bad node estimate: actual = %d >> request = %d",
1939 C->live_nodes() - live_at_begin, _nodes_required);
1940 }
1941 }
1942 #endif
1943 // Assert that we have stayed within the node budget limit.
1944 assert(C->live_nodes() < C->max_node_limit(),
1945 "Exceeding node budget limit: %d + %d > %d (request = %d)",
1946 C->live_nodes() - live_at_begin, live_at_begin,
1947 C->max_node_limit(), _nodes_required);
1948
1949 _nodes_required = UINT_MAX;
1950 }
1951
1952 private:
1953
1954 bool _created_loop_node;
1955 DEBUG_ONLY(void dump_idoms(Node* early, Node* wrong_lca);)
1956 NOT_PRODUCT(void dump_idoms_in_reverse(const Node* n, const Node_List& idom_list) const;)
1957
1958 public:
1959 void set_created_loop_node() { _created_loop_node = true; }
1960 bool created_loop_node() { return _created_loop_node; }
1961 void register_new_node(Node* n, Node* blk);
1962 void register_new_node_with_ctrl_of(Node* new_node, Node* ctrl_of) {
1963 register_new_node(new_node, get_ctrl(ctrl_of));
1964 }
1965
1966 Node* clone_and_register(Node* n, Node* ctrl) {
1967 n = n->clone();
1968 register_new_node(n, ctrl);
1969 return n;
1970 }
1971
1972 #ifdef ASSERT
1973 void dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA);
1974 #endif
1975
1976 #ifndef PRODUCT
1977 void dump() const;
1978 void dump_idom(Node* n) const { dump_idom(n, 1000); } // For debugging
1979 void dump_idom(Node* n, uint count) const;
1980 void get_idoms(Node* n, uint count, Unique_Node_List& idoms) const;
1981 void dump(IdealLoopTree* loop, uint rpo_idx, Node_List &rpo_list) const;
1982 IdealLoopTree* get_loop_idx(Node* n) const {
1983 // Dead nodes have no loop, so return the top level loop instead
1984 return _loop_or_ctrl[n->_idx] ? (IdealLoopTree*)_loop_or_ctrl[n->_idx] : _ltree_root;
1985 }
1986 // Print some stats
1987 static void print_statistics();
1988 static int _loop_invokes; // Count of PhaseIdealLoop invokes
1989 static int _loop_work; // Sum of PhaseIdealLoop x _unique
1990 static volatile int _long_loop_candidates;
1991 static volatile int _long_loop_nests;
1992 #endif
1993
1994 #ifdef ASSERT
1995 void verify() const;
1996 bool verify_idom_and_nodes(Node* root, const PhaseIdealLoop* phase_verify) const;
1997 bool verify_idom(Node* n, const PhaseIdealLoop* phase_verify) const;
1998 bool verify_loop_ctrl(Node* n, const PhaseIdealLoop* phase_verify) const;
1999 #endif
2000
2001 void rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list) const;
2002
2003 void check_counted_loop_shape(IdealLoopTree* loop, Node* head, BasicType bt) NOT_DEBUG_RETURN;
2004
2005 LoopNode* create_inner_head(IdealLoopTree* loop, BaseCountedLoopNode* head, IfNode* exit_test);
2006
2007
2008 int extract_long_range_checks(const IdealLoopTree* loop, jint stride_con, int iters_limit, PhiNode* phi,
2009 Node_List &range_checks);
2010
2011 void transform_long_range_checks(int stride_con, const Node_List &range_checks, Node* outer_phi,
2012 Node* inner_iters_actual_int, Node* inner_phi,
2013 Node* iv_add, LoopNode* inner_head);
2014
2015 Node* get_late_ctrl_with_anti_dep(LoadNode* n, Node* early, Node* LCA);
2016
2017 bool ctrl_of_use_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop, Node* ctrl);
2018
2019 bool ctrl_of_all_uses_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop);
2020
2021 bool would_sink_below_pre_loop_exit(IdealLoopTree* n_loop, Node* ctrl);
2022
2023 Node* compute_early_ctrl(Node* n, Node* n_ctrl);
2024
2025 void try_sink_out_of_loop(Node* n);
2026
2027 Node* clamp(Node* R, Node* L, Node* H);
2028
2029 bool safe_for_if_replacement(const Node* dom) const;
2030
2031 void push_pinned_nodes_thru_region(IfNode* dom_if, Node* region);
2032
2033 bool try_merge_identical_ifs(Node* n);
2034
2035 void clone_loop_body(const Node_List& body, Node_List &old_new, CloneMap* cm);
2036
2037 void fix_body_edges(const Node_List &body, IdealLoopTree* loop, const Node_List &old_new, int dd,
2038 IdealLoopTree* parent, bool partial);
2039
2040 void fix_ctrl_uses(const Node_List& body, const IdealLoopTree* loop, Node_List &old_new, CloneLoopMode mode,
2041 Node* side_by_side_idom, CloneMap* cm, Node_List &worklist);
2042
2043 void fix_data_uses(Node_List& body, IdealLoopTree* loop, CloneLoopMode mode, IdealLoopTree* outer_loop,
2044 uint new_counter, Node_List& old_new, Node_List& worklist, Node_List*& split_if_set,
2045 Node_List*& split_bool_set, Node_List*& split_cex_set);
2046
2047 void finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set);
2048
2049 bool at_relevant_ctrl(Node* n, const Node* blk1, const Node* blk2);
2050
2051 bool clone_cmp_loadklass_down(Node* n, const Node* blk1, const Node* blk2);
2052 void clone_loadklass_nodes_at_cmp_index(const Node* n, Node* cmp, int i);
2053 bool clone_cmp_down(Node* n, const Node* blk1, const Node* blk2);
2054 void clone_template_assertion_expression_down(Node* node);
2055
2056 Node* similar_subtype_check(const Node* x, Node* r_in);
2057
2058 void update_addp_chain_base(Node* x, Node* old_base, Node* new_base);
2059
2060 bool can_move_to_inner_loop(Node* n, LoopNode* n_loop, Node* x);
2061
2062 void pin_nodes_dependent_on(Node* ctrl, bool old_iff_is_rangecheck);
2063
2064 Node* ensure_node_and_inputs_are_above_pre_end(CountedLoopEndNode* pre_end, Node* node);
2065
2066 Node* new_assertion_predicate_opaque_init(Node* entry_control, Node* init, Node* int_zero);
2067
2068 bool try_make_short_running_loop(IdealLoopTree* loop, jint stride_con, const Node_List& range_checks, const uint iters_limit);
2069
2070 ConINode* intcon(jint i);
2071
2072 ConLNode* longcon(jlong i);
2073
2074 ConNode* makecon(const Type* t);
2075
2076 ConNode* integercon(jlong l, BasicType bt);
2077
2078 ConNode* zerocon(BasicType bt);
2079 };
2080
2081 class CountedLoopConverter {
2082 friend class PhaseIdealLoop;
2083
2084 // Match increment with optional truncation
2085 class TruncatedIncrement {
2086 bool _is_valid;
2087
2088 BasicType _bt;
2089
2090 Node* _incr;
2091 Node* _outer_trunc;
2092 Node* _inner_trunc;
2093 const TypeInteger* _trunc_type;
2094
2095 public:
2096 TruncatedIncrement(BasicType bt) :
2097 _is_valid(false),
2098 _bt(bt),
2099 _incr(nullptr),
2100 _outer_trunc(nullptr),
2101 _inner_trunc(nullptr),
2102 _trunc_type(nullptr) {}
2103
2104 void build(Node* expr);
2105
2106 bool is_valid() const { return _is_valid; }
2107 Node* incr() const { return _incr; }
2108
2109 // Optional truncation for: CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16
2110 Node* outer_trunc() const { return _outer_trunc; } // the outermost truncating node (either the & or the final >>)
2111 Node* inner_trunc() const { return _inner_trunc; } // the inner truncating node, if applicable (the << in a <</>> pair)
2112 const TypeInteger* trunc_type() const { return _trunc_type; }
2113 };
2114
2115 class LoopStructure {
2116 bool _is_valid;
2117
2118 const Node* _head;
2119 const IdealLoopTree* _loop;
2120 PhaseIdealLoop* _phase;
2121 BasicType _iv_bt;
2122
2123 Node* _back_control;
2124 PhaseIdealLoop::LoopExitTest _exit_test;
2125 PhaseIdealLoop::LoopIVIncr _iv_incr;
2126 TruncatedIncrement _truncated_increment;
2127 PhaseIdealLoop::LoopIVStride _stride;
2128 PhiNode* _phi;
2129 SafePointNode* _safepoint;
2130
2131 public:
2132 LoopStructure(const Node* head, const IdealLoopTree* loop, PhaseIdealLoop* phase, const BasicType iv_bt) :
2133 _is_valid(false),
2134 _head(head),
2135 _loop(loop),
2136 _phase(phase),
2137 _iv_bt(iv_bt),
2138 _back_control(_phase->loop_exit_control(_loop)),
2139 _exit_test(_back_control, _loop, _phase),
2140 _iv_incr(_head, _loop),
2141 _truncated_increment(_iv_bt),
2142 _stride(PhaseIdealLoop::LoopIVStride(_iv_bt)),
2143 _phi(nullptr),
2144 _safepoint(nullptr) {}
2145
2146 void build();
2147
2148 jlong final_limit_correction() const; // compute adjusted loop limit correction
2149 bool is_infinite_loop() const;
2150
2151 bool is_valid() const { return _is_valid; }
2152
2153 Node* back_control() const { return _back_control; }
2154 PhaseIdealLoop::LoopExitTest& exit_test() { return _exit_test; }
2155 PhaseIdealLoop::LoopIVIncr& iv_incr() { return _iv_incr; }
2156 TruncatedIncrement& truncated_increment() { return _truncated_increment; }
2157 PhaseIdealLoop::LoopIVStride& stride() { return _stride; }
2158 PhiNode* phi() const { return _phi; }
2159 SafePointNode* sfpt() const { return _safepoint; }
2160 jlong stride_con() const { return _stride.compute_non_zero_stride_con(_exit_test.mask(), _iv_bt); }
2161 Node* limit() const { return _exit_test.limit(); }
2162 };
2163
2164 PhaseIdealLoop* const _phase;
2165 Node* const _head;
2166 IdealLoopTree* const _loop;
2167 const BasicType _iv_bt;
2168
2169 LoopStructure _structure;
2170 bool _should_insert_stride_overflow_limit_check = false;
2171 bool _should_insert_init_trip_limit_check = false;
2172
2173 DEBUG_ONLY(bool _checked_for_counted_loop = false;)
2174
2175 // stats for PhaseIdealLoop::print_statistics()
2176 static volatile int _long_loop_counted_loops;
2177
2178 // Return a type based on condition control flow
2179 const TypeInt* filtered_type(Node* n, Node* n_ctrl);
2180 const TypeInt* filtered_type(Node* n) { return filtered_type(n, nullptr); }
2181 // Helpers for filtered type
2182 const TypeInt* filtered_type_from_dominators(Node* val, Node* val_ctrl);
2183
2184 void insert_loop_limit_check_predicate(const ParsePredicateSuccessProj* loop_limit_check_parse_proj, Node* bol) const;
2185 void insert_stride_overflow_limit_check() const;
2186 void insert_init_trip_limit_check() const;
2187 bool has_dominating_loop_limit_check(Node* init_trip, Node* limit, jlong stride_con, BasicType iv_bt,
2188 Node* loop_entry) const;
2189
2190 bool is_iv_overflowing(const TypeInteger* init_t, jlong stride_con, Node* phi_increment, BoolTest::mask mask) const;
2191 bool has_truncation_wrap(const TruncatedIncrement& truncation, Node* phi, jlong stride_con);
2192 SafePointNode* find_safepoint(Node* iftrue);
2193 bool is_safepoint_invalid(SafePointNode* sfpt) const;
2194
2195 public:
2196 CountedLoopConverter(PhaseIdealLoop* phase, Node* head, IdealLoopTree* loop, const BasicType iv_bt)
2197 : _phase(phase),
2198 _head(head),
2199 _loop(loop),
2200 _iv_bt(iv_bt),
2201 _structure(LoopStructure(_head, _loop, _phase, _iv_bt)) {
2202 assert(phase != nullptr, "must be"); // Fail early if mandatory parameters are null.
2203 assert(head != nullptr, "must be");
2204 assert(loop != nullptr, "must be");
2205 assert(iv_bt == T_INT || iv_bt == T_LONG, "either int or long loops");
2206 }
2207
2208 bool is_counted_loop();
2209 IdealLoopTree* convert();
2210
2211 DEBUG_ONLY(bool should_stress_long_counted_loop();)
2212 DEBUG_ONLY(bool stress_long_counted_loop();)
2213
2214 enum StrideOverflowState {
2215 Overflow = -1,
2216 NoOverflow = 0,
2217 RequireLimitCheck = 1
2218 };
2219 static StrideOverflowState check_stride_overflow(jlong final_correction, const TypeInteger* limit_t, BasicType bt);
2220 };
2221
2222 class AutoNodeBudget : public StackObj
2223 {
2224 public:
2225 enum budget_check_t { BUDGET_CHECK, NO_BUDGET_CHECK };
2226
2227 AutoNodeBudget(PhaseIdealLoop* phase, budget_check_t chk = BUDGET_CHECK)
2228 : _phase(phase),
2229 _check_at_final(chk == BUDGET_CHECK),
2230 _nodes_at_begin(0)
2231 {
2232 precond(_phase != nullptr);
2233
2234 _nodes_at_begin = _phase->require_nodes_begin();
2235 }
2236
2237 ~AutoNodeBudget() {
2238 #ifndef PRODUCT
2239 if (TraceLoopOpts) {
2240 uint request = _phase->nodes_required();
2241 uint delta = _phase->C->live_nodes() - _nodes_at_begin;
2242
2243 if (request < delta) {
2244 tty->print_cr("Exceeding node budget: %d < %d", request, delta);
2245 } else {
2246 uint const REQUIRE_MIN = PhaseIdealLoop::REQUIRE_MIN;
2247 // Identify the worst estimates as "poor" ones.
2248 if (request > REQUIRE_MIN && delta > 0) {
2249 if ((delta > REQUIRE_MIN && request > 3 * delta) ||
2250 (delta <= REQUIRE_MIN && request > 10 * delta)) {
2251 tty->print_cr("Poor node estimate: %d >> %d", request, delta);
2252 }
2253 }
2254 }
2255 }
2256 #endif // PRODUCT
2257 _phase->require_nodes_final(_nodes_at_begin, _check_at_final);
2258 }
2259
2260 private:
2261 PhaseIdealLoop* _phase;
2262 bool _check_at_final;
2263 uint _nodes_at_begin;
2264 };
2265
2266 inline Node* IdealLoopTree::tail() {
2267 // Handle lazy update of _tail field.
2268 if (_tail->in(0) == nullptr) {
2269 _tail = _phase->get_ctrl(_tail);
2270 }
2271 return _tail;
2272 }
2273
2274 inline Node* IdealLoopTree::head() {
2275 // Handle lazy update of _head field.
2276 if (_head->in(0) == nullptr) {
2277 _head = _phase->get_ctrl(_head);
2278 }
2279 return _head;
2280 }
2281
2282 // Iterate over the loop tree using a preorder, left-to-right traversal.
2283 //
2284 // Example that visits all counted loops from within PhaseIdealLoop
2285 //
2286 // for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2287 // IdealLoopTree* lpt = iter.current();
2288 // if (!lpt->is_counted()) continue;
2289 // ...
2290 class LoopTreeIterator : public StackObj {
2291 private:
2292 IdealLoopTree* _root;
2293 IdealLoopTree* _curnt;
2294
2295 public:
2296 LoopTreeIterator(IdealLoopTree* root) : _root(root), _curnt(root) {}
2297
2298 bool done() { return _curnt == nullptr; } // Finished iterating?
2299
2300 void next(); // Advance to next loop tree
2301
2302 IdealLoopTree* current() { return _curnt; } // Return current value of iterator.
2303 };
2304
2305 // Compute probability of reaching some CFG node from a fixed
2306 // dominating CFG node
2307 class PathFrequency {
2308 private:
2309 Node* _dom; // frequencies are computed relative to this node
2310 Node_Stack _stack;
2311 GrowableArray<float> _freqs_stack; // keep track of intermediate result at regions
2312 GrowableArray<float> _freqs; // cache frequencies
2313 PhaseIdealLoop* _phase;
2314
2315 float check_and_truncate_frequency(float f) {
2316 assert(f >= 0, "Incorrect frequency");
2317 // We do not perform an exact (f <= 1) check
2318 // this would be error prone with rounding of floats.
2319 // Performing a check like (f <= 1+eps) would be of benefit,
2320 // however, it is not evident how to determine such an eps,
2321 // given that an arbitrary number of add/mul operations
2322 // are performed on these frequencies.
2323 return (f > 1) ? 1 : f;
2324 }
2325
2326 public:
2327 PathFrequency(Node* dom, PhaseIdealLoop* phase)
2328 : _dom(dom), _stack(0), _phase(phase) {
2329 }
2330
2331 float to(Node* n);
2332 };
2333
2334 // Class to clone a data node graph by taking a list of data nodes. This is done in 2 steps:
2335 // 1. Clone the data nodes
2336 // 2. Fix the cloned data inputs pointing to the old nodes to the cloned inputs by using an old->new mapping.
2337 class DataNodeGraph : public StackObj {
2338 PhaseIdealLoop* const _phase;
2339 const Unique_Node_List& _data_nodes;
2340 OrigToNewHashtable _orig_to_new;
2341
2342 public:
2343 DataNodeGraph(const Unique_Node_List& data_nodes, PhaseIdealLoop* phase)
2344 : _phase(phase),
2345 _data_nodes(data_nodes),
2346 // Use 107 as best guess which is the first resize value in ResizeableHashTable::large_table_sizes.
2347 _orig_to_new(107, MaxNodeLimit)
2348 {
2349 #ifdef ASSERT
2350 for (uint i = 0; i < data_nodes.size(); i++) {
2351 assert(!data_nodes[i]->is_CFG(), "only data nodes");
2352 }
2353 #endif
2354 }
2355 NONCOPYABLE(DataNodeGraph);
2356
2357 private:
2358 void clone(Node* node, Node* new_ctrl);
2359 void clone_data_nodes(Node* new_ctrl);
2360 void clone_data_nodes_and_transform_opaque_loop_nodes(const TransformStrategyForOpaqueLoopNodes& transform_strategy,
2361 Node* new_ctrl);
2362 void rewire_clones_to_cloned_inputs();
2363 void transform_opaque_node(const TransformStrategyForOpaqueLoopNodes& transform_strategy, Node* node);
2364
2365 public:
2366 // Clone the provided data node collection and rewire the clones in such a way to create an identical graph copy.
2367 // Set 'new_ctrl' as ctrl for the cloned nodes.
2368 const OrigToNewHashtable& clone(Node* new_ctrl) {
2369 assert(_orig_to_new.number_of_entries() == 0, "should not call this method twice in a row");
2370 clone_data_nodes(new_ctrl);
2371 rewire_clones_to_cloned_inputs();
2372 return _orig_to_new;
2373 }
2374
2375 // Create a copy of the data nodes provided to the constructor by doing the following:
2376 // Clone all non-OpaqueLoop* nodes and rewire them to create an identical subgraph copy. For the OpaqueLoop* nodes,
2377 // apply the provided transformation strategy and include the transformed node into the subgraph copy to get a complete
2378 // "cloned-and-transformed" graph copy. For all newly cloned nodes (which could also be new OpaqueLoop* nodes), set
2379 // `new_ctrl` as ctrl.
2380 const OrigToNewHashtable& clone_with_opaque_loop_transform_strategy(
2381 const TransformStrategyForOpaqueLoopNodes& transform_strategy,
2382 Node* new_ctrl) {
2383 clone_data_nodes_and_transform_opaque_loop_nodes(transform_strategy, new_ctrl);
2384 rewire_clones_to_cloned_inputs();
2385 return _orig_to_new;
2386 }
2387 };
2388 #endif // SHARE_OPTO_LOOPNODE_HPP