1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciMethodData.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shared/c2/barrierSetC2.hpp"
29 #include "libadt/vectset.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/arraycopynode.hpp"
34 #include "opto/c2_globals.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/connode.hpp"
38 #include "opto/convertnode.hpp"
39 #include "opto/divnode.hpp"
40 #include "opto/idealGraphPrinter.hpp"
41 #include "opto/loopnode.hpp"
42 #include "opto/movenode.hpp"
43 #include "opto/mulnode.hpp"
44 #include "opto/opaquenode.hpp"
45 #include "opto/opcodes.hpp"
46 #include "opto/predicates.hpp"
47 #include "opto/rootnode.hpp"
48 #include "opto/runtime.hpp"
49 #include "opto/vectorization.hpp"
50 #include "runtime/sharedRuntime.hpp"
51 #include "utilities/checkedCast.hpp"
52 #include "utilities/powerOfTwo.hpp"
53
54 //=============================================================================
55 //--------------------------is_cloop_ind_var-----------------------------------
56 // Determine if a node is a counted loop induction variable.
57 // NOTE: The method is declared in "node.hpp".
58 bool Node::is_cloop_ind_var() const {
59 return (is_Phi() &&
60 as_Phi()->region()->is_CountedLoop() &&
61 as_Phi()->region()->as_CountedLoop()->phi() == this);
62 }
63
64 //=============================================================================
65 //------------------------------dump_spec--------------------------------------
66 // Dump special per-node info
67 #ifndef PRODUCT
68 void LoopNode::dump_spec(outputStream *st) const {
69 RegionNode::dump_spec(st);
70 if (is_inner_loop()) st->print( "inner " );
71 if (is_partial_peel_loop()) st->print( "partial_peel " );
72 if (partial_peel_has_failed()) st->print( "partial_peel_failed " );
73 }
74 #endif
75
76 //------------------------------is_valid_counted_loop-------------------------
77 bool LoopNode::is_valid_counted_loop(BasicType bt) const {
78 if (is_BaseCountedLoop() && as_BaseCountedLoop()->bt() == bt) {
79 BaseCountedLoopNode* l = as_BaseCountedLoop();
80 BaseCountedLoopEndNode* le = l->loopexit_or_null();
81 if (le != nullptr &&
82 le->proj_out_or_null(1 /* true */) == l->in(LoopNode::LoopBackControl)) {
83 Node* phi = l->phi();
84 Node* exit = le->proj_out_or_null(0 /* false */);
85 if (exit != nullptr && exit->Opcode() == Op_IfFalse &&
86 phi != nullptr && phi->is_Phi() &&
87 phi->in(LoopNode::LoopBackControl) == l->incr() &&
88 le->loopnode() == l && le->stride_is_con()) {
89 return true;
90 }
91 }
92 }
93 return false;
94 }
95
96 //------------------------------get_early_ctrl---------------------------------
97 // Compute earliest legal control
98 Node *PhaseIdealLoop::get_early_ctrl( Node *n ) {
99 assert( !n->is_Phi() && !n->is_CFG(), "this code only handles data nodes" );
100 uint i;
101 Node *early;
102 if (n->in(0) && !n->is_expensive()) {
103 early = n->in(0);
104 if (!early->is_CFG()) // Might be a non-CFG multi-def
105 early = get_ctrl(early); // So treat input as a straight data input
106 i = 1;
107 } else {
108 early = get_ctrl(n->in(1));
109 i = 2;
110 }
111 uint e_d = dom_depth(early);
112 assert( early, "" );
113 for (; i < n->req(); i++) {
114 Node *cin = get_ctrl(n->in(i));
115 assert( cin, "" );
116 // Keep deepest dominator depth
117 uint c_d = dom_depth(cin);
118 if (c_d > e_d) { // Deeper guy?
119 early = cin; // Keep deepest found so far
120 e_d = c_d;
121 } else if (c_d == e_d && // Same depth?
122 early != cin) { // If not equal, must use slower algorithm
123 // If same depth but not equal, one _must_ dominate the other
124 // and we want the deeper (i.e., dominated) guy.
125 Node *n1 = early;
126 Node *n2 = cin;
127 while (1) {
128 n1 = idom(n1); // Walk up until break cycle
129 n2 = idom(n2);
130 if (n1 == cin || // Walked early up to cin
131 dom_depth(n2) < c_d)
132 break; // early is deeper; keep him
133 if (n2 == early || // Walked cin up to early
134 dom_depth(n1) < c_d) {
135 early = cin; // cin is deeper; keep him
136 break;
137 }
138 }
139 e_d = dom_depth(early); // Reset depth register cache
140 }
141 }
142
143 // Return earliest legal location
144 assert(early == find_non_split_ctrl(early), "unexpected early control");
145
146 if (n->is_expensive() && !_verify_only && !_verify_me) {
147 assert(n->in(0), "should have control input");
148 early = get_early_ctrl_for_expensive(n, early);
149 }
150
151 return early;
152 }
153
154 //------------------------------get_early_ctrl_for_expensive---------------------------------
155 // Move node up the dominator tree as high as legal while still beneficial
156 Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) {
157 assert(n->in(0) && n->is_expensive(), "expensive node with control input here");
158 assert(OptimizeExpensiveOps, "optimization off?");
159
160 Node* ctl = n->in(0);
161 assert(ctl->is_CFG(), "expensive input 0 must be cfg");
162 uint min_dom_depth = dom_depth(earliest);
163 #ifdef ASSERT
164 if (!is_dominator(ctl, earliest) && !is_dominator(earliest, ctl)) {
165 dump_bad_graph("Bad graph detected in get_early_ctrl_for_expensive", n, earliest, ctl);
166 assert(false, "Bad graph detected in get_early_ctrl_for_expensive");
167 }
168 #endif
169 if (dom_depth(ctl) < min_dom_depth) {
170 return earliest;
171 }
172
173 while (true) {
174 Node* next = ctl;
175 // Moving the node out of a loop on the projection of an If
176 // confuses Loop Predication. So, once we hit a loop in an If branch
177 // that doesn't branch to an UNC, we stop. The code that process
178 // expensive nodes will notice the loop and skip over it to try to
179 // move the node further up.
180 if (ctl->is_CountedLoop() && ctl->in(1) != nullptr && ctl->in(1)->in(0) != nullptr && ctl->in(1)->in(0)->is_If()) {
181 if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern()) {
182 break;
183 }
184 next = idom(ctl->in(1)->in(0));
185 } else if (ctl->is_Proj()) {
186 // We only move it up along a projection if the projection is
187 // the single control projection for its parent: same code path,
188 // if it's a If with UNC or fallthrough of a call.
189 Node* parent_ctl = ctl->in(0);
190 if (parent_ctl == nullptr) {
191 break;
192 } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != nullptr) {
193 next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control();
194 } else if (parent_ctl->is_If()) {
195 if (!ctl->as_Proj()->is_uncommon_trap_if_pattern()) {
196 break;
197 }
198 assert(idom(ctl) == parent_ctl, "strange");
199 next = idom(parent_ctl);
200 } else if (ctl->is_CatchProj()) {
201 if (ctl->as_Proj()->_con != CatchProjNode::fall_through_index) {
202 break;
203 }
204 assert(parent_ctl->in(0)->in(0)->is_Call(), "strange graph");
205 next = parent_ctl->in(0)->in(0)->in(0);
206 } else {
207 // Check if parent control has a single projection (this
208 // control is the only possible successor of the parent
209 // control). If so, we can try to move the node above the
210 // parent control.
211 int nb_ctl_proj = 0;
212 for (DUIterator_Fast imax, i = parent_ctl->fast_outs(imax); i < imax; i++) {
213 Node *p = parent_ctl->fast_out(i);
214 if (p->is_Proj() && p->is_CFG()) {
215 nb_ctl_proj++;
216 if (nb_ctl_proj > 1) {
217 break;
218 }
219 }
220 }
221
222 if (nb_ctl_proj > 1) {
223 break;
224 }
225 assert(parent_ctl->is_Start() || parent_ctl->is_MemBar() || parent_ctl->is_Call() ||
226 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(parent_ctl), "unexpected node");
227 assert(idom(ctl) == parent_ctl, "strange");
228 next = idom(parent_ctl);
229 }
230 } else {
231 next = idom(ctl);
232 }
233 if (next->is_Root() || next->is_Start() || dom_depth(next) < min_dom_depth) {
234 break;
235 }
236 ctl = next;
237 }
238
239 if (ctl != n->in(0)) {
240 _igvn.replace_input_of(n, 0, ctl);
241 _igvn.hash_insert(n);
242 }
243
244 return ctl;
245 }
246
247
248 //------------------------------set_early_ctrl---------------------------------
249 // Set earliest legal control
250 void PhaseIdealLoop::set_early_ctrl(Node* n, bool update_body) {
251 Node *early = get_early_ctrl(n);
252
253 // Record earliest legal location
254 set_ctrl(n, early);
255 IdealLoopTree *loop = get_loop(early);
256 if (update_body && loop->_child == nullptr) {
257 loop->_body.push(n);
258 }
259 }
260
261 //------------------------------set_subtree_ctrl-------------------------------
262 // set missing _ctrl entries on new nodes
263 void PhaseIdealLoop::set_subtree_ctrl(Node* n, bool update_body) {
264 // Already set? Get out.
265 if (_loop_or_ctrl[n->_idx]) return;
266 // Recursively set _loop_or_ctrl array to indicate where the Node goes
267 uint i;
268 for (i = 0; i < n->req(); ++i) {
269 Node *m = n->in(i);
270 if (m && m != C->root()) {
271 set_subtree_ctrl(m, update_body);
272 }
273 }
274
275 // Fixup self
276 set_early_ctrl(n, update_body);
277 }
278
279 IdealLoopTree* PhaseIdealLoop::insert_outer_loop(IdealLoopTree* loop, LoopNode* outer_l, Node* outer_ift) {
280 IdealLoopTree* outer_ilt = new IdealLoopTree(this, outer_l, outer_ift);
281 IdealLoopTree* parent = loop->_parent;
282 IdealLoopTree* sibling = parent->_child;
283 if (sibling == loop) {
284 parent->_child = outer_ilt;
285 } else {
286 while (sibling->_next != loop) {
287 sibling = sibling->_next;
288 }
289 sibling->_next = outer_ilt;
290 }
291 outer_ilt->_next = loop->_next;
292 outer_ilt->_parent = parent;
293 outer_ilt->_child = loop;
294 outer_ilt->_nest = loop->_nest;
295 loop->_parent = outer_ilt;
296 loop->_next = nullptr;
297 loop->_nest++;
298 assert(loop->_nest <= SHRT_MAX, "sanity");
299 return outer_ilt;
300 }
301
302 // Create a skeleton strip mined outer loop: an OuterStripMinedLoop head before the inner strip mined CountedLoop, a
303 // SafePoint on exit of the inner CountedLoopEnd and an OuterStripMinedLoopEnd test that can't constant fold until loop
304 // optimizations are over. The inner strip mined loop is left as it is. Only once loop optimizations are over, do we
305 // adjust the inner loop exit condition to limit its number of iterations, set the outer loop exit condition and add
306 // Phis to the outer loop head. Some loop optimizations that operate on the inner strip mined loop need to be aware of
307 // the outer strip mined loop: loop unswitching needs to clone the outer loop as well as the inner, unrolling needs to
308 // only clone the inner loop etc. No optimizations need to change the outer strip mined loop as it is only a skeleton.
309 //
310 // Schematically:
311 //
312 // OuterStripMinedLoop -------|
313 // | |
314 // CountedLoop ----------- | |
315 // \- Phi (iv) -| | |
316 // / \ | | |
317 // CmpI AddI --| | |
318 // \ | |
319 // Bool | |
320 // \ | |
321 // CountedLoopEnd | |
322 // / \ | |
323 // IfFalse IfTrue--------| |
324 // | |
325 // SafePoint |
326 // | |
327 // OuterStripMinedLoopEnd |
328 // / \ |
329 // IfFalse IfTrue-----------|
330 // |
331 //
332 //
333 // As loop optimizations transform the inner loop, the outer strip mined loop stays mostly unchanged. The only exception
334 // is nodes referenced from the SafePoint and sunk from the inner loop: they end up in the outer strip mined loop.
335 //
336 // Not adding Phis to the outer loop head from the beginning, and only adding them after loop optimizations does not
337 // conform to C2's IR rules: any variable or memory slice that is mutated in a loop should have a Phi. The main
338 // motivation for such a design that doesn't conform to C2's IR rules is to allow existing loop optimizations to be
339 // mostly unaffected by the outer strip mined loop: the only extra step needed in most cases is to step over the
340 // OuterStripMinedLoop. The main drawback is that once loop optimizations are over, an extra step is needed to finish
341 // constructing the outer loop. This is handled by OuterStripMinedLoopNode::adjust_strip_mined_loop().
342 //
343 // Adding Phis to the outer loop is largely straightforward: there needs to be one Phi in the outer loop for every Phi
344 // in the inner loop. Things may be more complicated for sunk Store nodes: there may not be any inner loop Phi left
345 // after sinking for a particular memory slice but the outer loop needs a Phi. See
346 // OuterStripMinedLoopNode::handle_sunk_stores_when_finishing_construction()
347 IdealLoopTree* PhaseIdealLoop::create_outer_strip_mined_loop(Node* init_control,
348 IdealLoopTree* loop, float cl_prob, float le_fcnt,
349 Node*& entry_control, Node*& iffalse) {
350 Node* outer_test = intcon(0);
351 Node *orig = iffalse;
352 iffalse = iffalse->clone();
353 _igvn.register_new_node_with_optimizer(iffalse);
354 set_idom(iffalse, idom(orig), dom_depth(orig));
355
356 IfNode *outer_le = new OuterStripMinedLoopEndNode(iffalse, outer_test, cl_prob, le_fcnt);
357 Node *outer_ift = new IfTrueNode (outer_le);
358 Node* outer_iff = orig;
359 _igvn.replace_input_of(outer_iff, 0, outer_le);
360
361 LoopNode *outer_l = new OuterStripMinedLoopNode(C, init_control, outer_ift);
362 entry_control = outer_l;
363
364 IdealLoopTree* outer_ilt = insert_outer_loop(loop, outer_l, outer_ift);
365
366 set_loop(iffalse, outer_ilt);
367 // When this code runs, loop bodies have not yet been populated.
368 const bool body_populated = false;
369 register_control(outer_le, outer_ilt, iffalse, body_populated);
370 register_control(outer_ift, outer_ilt, outer_le, body_populated);
371 set_idom(outer_iff, outer_le, dom_depth(outer_le));
372 _igvn.register_new_node_with_optimizer(outer_l);
373 set_loop(outer_l, outer_ilt);
374 set_idom(outer_l, init_control, dom_depth(init_control)+1);
375
376 return outer_ilt;
377 }
378
379 void PhaseIdealLoop::insert_loop_limit_check_predicate(ParsePredicateSuccessProj* loop_limit_check_parse_proj,
380 Node* cmp_limit, Node* bol) {
381 assert(loop_limit_check_parse_proj->in(0)->is_ParsePredicate(), "must be parse predicate");
382 Node* new_predicate_proj = create_new_if_for_predicate(loop_limit_check_parse_proj, nullptr,
383 Deoptimization::Reason_loop_limit_check,
384 Op_If);
385 Node* iff = new_predicate_proj->in(0);
386 cmp_limit = _igvn.register_new_node_with_optimizer(cmp_limit);
387 bol = _igvn.register_new_node_with_optimizer(bol);
388 set_subtree_ctrl(bol, false);
389 _igvn.replace_input_of(iff, 1, bol);
390
391 #ifndef PRODUCT
392 // report that the loop predication has been actually performed
393 // for this loop
394 if (TraceLoopLimitCheck) {
395 tty->print_cr("Counted Loop Limit Check generated:");
396 DEBUG_ONLY( bol->dump(2); )
397 }
398 #endif
399 }
400
401 Node* PhaseIdealLoop::loop_exit_control(Node* x, IdealLoopTree* loop) {
402 // Counted loop head must be a good RegionNode with only 3 not null
403 // control input edges: Self, Entry, LoopBack.
404 if (x->in(LoopNode::Self) == nullptr || x->req() != 3 || loop->_irreducible) {
405 return nullptr;
406 }
407 Node *init_control = x->in(LoopNode::EntryControl);
408 Node *back_control = x->in(LoopNode::LoopBackControl);
409 if (init_control == nullptr || back_control == nullptr) { // Partially dead
410 return nullptr;
411 }
412 // Must also check for TOP when looking for a dead loop
413 if (init_control->is_top() || back_control->is_top()) {
414 return nullptr;
415 }
416
417 // Allow funny placement of Safepoint
418 if (back_control->Opcode() == Op_SafePoint) {
419 back_control = back_control->in(TypeFunc::Control);
420 }
421
422 // Controlling test for loop
423 Node *iftrue = back_control;
424 uint iftrue_op = iftrue->Opcode();
425 if (iftrue_op != Op_IfTrue &&
426 iftrue_op != Op_IfFalse) {
427 // I have a weird back-control. Probably the loop-exit test is in
428 // the middle of the loop and I am looking at some trailing control-flow
429 // merge point. To fix this I would have to partially peel the loop.
430 return nullptr; // Obscure back-control
431 }
432
433 // Get boolean guarding loop-back test
434 Node *iff = iftrue->in(0);
435 if (get_loop(iff) != loop || !iff->in(1)->is_Bool()) {
436 return nullptr;
437 }
438 return iftrue;
439 }
440
441 Node* PhaseIdealLoop::loop_exit_test(Node* back_control, IdealLoopTree* loop, Node*& incr, Node*& limit, BoolTest::mask& bt, float& cl_prob) {
442 Node* iftrue = back_control;
443 uint iftrue_op = iftrue->Opcode();
444 Node* iff = iftrue->in(0);
445 BoolNode* test = iff->in(1)->as_Bool();
446 bt = test->_test._test;
447 cl_prob = iff->as_If()->_prob;
448 if (iftrue_op == Op_IfFalse) {
449 bt = BoolTest(bt).negate();
450 cl_prob = 1.0 - cl_prob;
451 }
452 // Get backedge compare
453 Node* cmp = test->in(1);
454 if (!cmp->is_Cmp()) {
455 return nullptr;
456 }
457
458 // Find the trip-counter increment & limit. Limit must be loop invariant.
459 incr = cmp->in(1);
460 limit = cmp->in(2);
461
462 // ---------
463 // need 'loop()' test to tell if limit is loop invariant
464 // ---------
465
466 if (!is_member(loop, get_ctrl(incr))) { // Swapped trip counter and limit?
467 Node* tmp = incr; // Then reverse order into the CmpI
468 incr = limit;
469 limit = tmp;
470 bt = BoolTest(bt).commute(); // And commute the exit test
471 }
472 if (is_member(loop, get_ctrl(limit))) { // Limit must be loop-invariant
473 return nullptr;
474 }
475 if (!is_member(loop, get_ctrl(incr))) { // Trip counter must be loop-variant
476 return nullptr;
477 }
478 return cmp;
479 }
480
481 Node* PhaseIdealLoop::loop_iv_incr(Node* incr, Node* x, IdealLoopTree* loop, Node*& phi_incr) {
482 if (incr->is_Phi()) {
483 if (incr->as_Phi()->region() != x || incr->req() != 3) {
484 return nullptr; // Not simple trip counter expression
485 }
486 phi_incr = incr;
487 incr = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi
488 if (!is_member(loop, get_ctrl(incr))) { // Trip counter must be loop-variant
489 return nullptr;
490 }
491 }
492 return incr;
493 }
494
495 Node* PhaseIdealLoop::loop_iv_stride(Node* incr, Node*& xphi) {
496 assert(incr->Opcode() == Op_AddI || incr->Opcode() == Op_AddL, "caller resp.");
497 // Get merge point
498 xphi = incr->in(1);
499 Node *stride = incr->in(2);
500 if (!stride->is_Con()) { // Oops, swap these
501 if (!xphi->is_Con()) { // Is the other guy a constant?
502 return nullptr; // Nope, unknown stride, bail out
503 }
504 Node *tmp = xphi; // 'incr' is commutative, so ok to swap
505 xphi = stride;
506 stride = tmp;
507 }
508 return stride;
509 }
510
511 PhiNode* PhaseIdealLoop::loop_iv_phi(Node* xphi, Node* phi_incr, Node* x) {
512 if (!xphi->is_Phi()) {
513 return nullptr; // Too much math on the trip counter
514 }
515 if (phi_incr != nullptr && phi_incr != xphi) {
516 return nullptr;
517 }
518 PhiNode *phi = xphi->as_Phi();
519
520 // Phi must be of loop header; backedge must wrap to increment
521 if (phi->region() != x) {
522 return nullptr;
523 }
524 return phi;
525 }
526
527 static int check_stride_overflow(jlong final_correction, const TypeInteger* limit_t, BasicType bt) {
528 if (final_correction > 0) {
529 if (limit_t->lo_as_long() > (max_signed_integer(bt) - final_correction)) {
530 return -1;
531 }
532 if (limit_t->hi_as_long() > (max_signed_integer(bt) - final_correction)) {
533 return 1;
534 }
535 } else {
536 if (limit_t->hi_as_long() < (min_signed_integer(bt) - final_correction)) {
537 return -1;
538 }
539 if (limit_t->lo_as_long() < (min_signed_integer(bt) - final_correction)) {
540 return 1;
541 }
542 }
543 return 0;
544 }
545
546 static bool condition_stride_ok(BoolTest::mask bt, jlong stride_con) {
547 // If the condition is inverted and we will be rolling
548 // through MININT to MAXINT, then bail out.
549 if (bt == BoolTest::eq || // Bail out, but this loop trips at most twice!
550 // Odd stride
551 (bt == BoolTest::ne && stride_con != 1 && stride_con != -1) ||
552 // Count down loop rolls through MAXINT
553 ((bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0) ||
554 // Count up loop rolls through MININT
555 ((bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0)) {
556 return false; // Bail out
557 }
558 return true;
559 }
560
561 Node* PhaseIdealLoop::loop_nest_replace_iv(Node* iv_to_replace, Node* inner_iv, Node* outer_phi, Node* inner_head,
562 BasicType bt) {
563 Node* iv_as_long;
564 if (bt == T_LONG) {
565 iv_as_long = new ConvI2LNode(inner_iv, TypeLong::INT);
566 register_new_node(iv_as_long, inner_head);
567 } else {
568 iv_as_long = inner_iv;
569 }
570 Node* iv_replacement = AddNode::make(outer_phi, iv_as_long, bt);
571 register_new_node(iv_replacement, inner_head);
572 for (DUIterator_Last imin, i = iv_to_replace->last_outs(imin); i >= imin;) {
573 Node* u = iv_to_replace->last_out(i);
574 #ifdef ASSERT
575 if (!is_dominator(inner_head, ctrl_or_self(u))) {
576 assert(u->is_Phi(), "should be a Phi");
577 for (uint j = 1; j < u->req(); j++) {
578 if (u->in(j) == iv_to_replace) {
579 assert(is_dominator(inner_head, u->in(0)->in(j)), "iv use above loop?");
580 }
581 }
582 }
583 #endif
584 _igvn.rehash_node_delayed(u);
585 int nb = u->replace_edge(iv_to_replace, iv_replacement, &_igvn);
586 i -= nb;
587 }
588 return iv_replacement;
589 }
590
591 // Add a Parse Predicate with an uncommon trap on the failing/false path. Normal control will continue on the true path.
592 void PhaseIdealLoop::add_parse_predicate(Deoptimization::DeoptReason reason, Node* inner_head, IdealLoopTree* loop,
593 SafePointNode* sfpt) {
594 if (!C->too_many_traps(reason)) {
595 ParsePredicateNode* parse_predicate = new ParsePredicateNode(inner_head->in(LoopNode::EntryControl), reason, &_igvn);
596 register_control(parse_predicate, loop, inner_head->in(LoopNode::EntryControl));
597 Node* if_false = new IfFalseNode(parse_predicate);
598 register_control(if_false, _ltree_root, parse_predicate);
599 Node* if_true = new IfTrueNode(parse_predicate);
600 register_control(if_true, loop, parse_predicate);
601
602 int trap_request = Deoptimization::make_trap_request(reason, Deoptimization::Action_maybe_recompile);
603 address call_addr = OptoRuntime::uncommon_trap_blob()->entry_point();
604 const TypePtr* no_memory_effects = nullptr;
605 CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap",
606 no_memory_effects);
607
608 Node* mem = nullptr;
609 Node* i_o = nullptr;
610 if (sfpt->is_Call()) {
611 mem = sfpt->proj_out(TypeFunc::Memory);
612 i_o = sfpt->proj_out(TypeFunc::I_O);
613 } else {
614 mem = sfpt->memory();
615 i_o = sfpt->i_o();
616 }
617
618 Node *frame = new ParmNode(C->start(), TypeFunc::FramePtr);
619 register_new_node(frame, C->start());
620 Node *ret = new ParmNode(C->start(), TypeFunc::ReturnAdr);
621 register_new_node(ret, C->start());
622
623 unc->init_req(TypeFunc::Control, if_false);
624 unc->init_req(TypeFunc::I_O, i_o);
625 unc->init_req(TypeFunc::Memory, mem); // may gc ptrs
626 unc->init_req(TypeFunc::FramePtr, frame);
627 unc->init_req(TypeFunc::ReturnAdr, ret);
628 unc->init_req(TypeFunc::Parms+0, _igvn.intcon(trap_request));
629 unc->set_cnt(PROB_UNLIKELY_MAG(4));
630 unc->copy_call_debug_info(&_igvn, sfpt);
631
632 for (uint i = TypeFunc::Parms; i < unc->req(); i++) {
633 set_subtree_ctrl(unc->in(i), false);
634 }
635 register_control(unc, _ltree_root, if_false);
636
637 Node* ctrl = new ProjNode(unc, TypeFunc::Control);
638 register_control(ctrl, _ltree_root, unc);
639 Node* halt = new HaltNode(ctrl, frame, "uncommon trap returned which should never happen" PRODUCT_ONLY(COMMA /*reachable*/false));
640 register_control(halt, _ltree_root, ctrl);
641 _igvn.add_input_to(C->root(), halt);
642
643 _igvn.replace_input_of(inner_head, LoopNode::EntryControl, if_true);
644 set_idom(inner_head, if_true, dom_depth(inner_head));
645 }
646 }
647
648 // Find a safepoint node that dominates the back edge. We need a
649 // SafePointNode so we can use its jvm state to create empty
650 // predicates.
651 static bool no_side_effect_since_safepoint(Compile* C, Node* x, Node* mem, MergeMemNode* mm, PhaseIdealLoop* phase) {
652 SafePointNode* safepoint = nullptr;
653 for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
654 Node* u = x->fast_out(i);
655 if (u->is_memory_phi()) {
656 Node* m = u->in(LoopNode::LoopBackControl);
657 if (u->adr_type() == TypePtr::BOTTOM) {
658 if (m->is_MergeMem() && mem->is_MergeMem()) {
659 if (m != mem DEBUG_ONLY(|| true)) {
660 // MergeMemStream can modify m, for example to adjust the length to mem.
661 // This is unfortunate, and probably unnecessary. But as it is, we need
662 // to add m to the igvn worklist, else we may have a modified node that
663 // is not on the igvn worklist.
664 phase->igvn()._worklist.push(m);
665 for (MergeMemStream mms(m->as_MergeMem(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
666 if (!mms.is_empty()) {
667 if (mms.memory() != mms.memory2()) {
668 return false;
669 }
670 #ifdef ASSERT
671 if (mms.alias_idx() != Compile::AliasIdxBot) {
672 mm->set_memory_at(mms.alias_idx(), mem->as_MergeMem()->base_memory());
673 }
674 #endif
675 }
676 }
677 }
678 } else if (mem->is_MergeMem()) {
679 if (m != mem->as_MergeMem()->base_memory()) {
680 return false;
681 }
682 } else {
683 return false;
684 }
685 } else {
686 if (mem->is_MergeMem()) {
687 if (m != mem->as_MergeMem()->memory_at(C->get_alias_index(u->adr_type()))) {
688 return false;
689 }
690 #ifdef ASSERT
691 mm->set_memory_at(C->get_alias_index(u->adr_type()), mem->as_MergeMem()->base_memory());
692 #endif
693 } else {
694 if (m != mem) {
695 return false;
696 }
697 }
698 }
699 }
700 }
701 return true;
702 }
703
704 SafePointNode* PhaseIdealLoop::find_safepoint(Node* back_control, Node* x, IdealLoopTree* loop) {
705 IfNode* exit_test = back_control->in(0)->as_If();
706 SafePointNode* safepoint = nullptr;
707 if (exit_test->in(0)->is_SafePoint() && exit_test->in(0)->outcnt() == 1) {
708 safepoint = exit_test->in(0)->as_SafePoint();
709 } else {
710 Node* c = back_control;
711 while (c != x && c->Opcode() != Op_SafePoint) {
712 c = idom(c);
713 }
714
715 if (c->Opcode() == Op_SafePoint) {
716 safepoint = c->as_SafePoint();
717 }
718
719 if (safepoint == nullptr) {
720 return nullptr;
721 }
722
723 Node* mem = safepoint->in(TypeFunc::Memory);
724
725 // We can only use that safepoint if there's no side effect between the backedge and the safepoint.
726
727 // mm is the memory state at the safepoint (when it's a MergeMem)
728 // no_side_effect_since_safepoint() goes over the memory state at the backedge. It resets the mm input for each
729 // component of the memory state it encounters so it points to the base memory. Once no_side_effect_since_safepoint()
730 // is done, if no side effect after the safepoint was found, mm should transform to the base memory: the states at
731 // the backedge and safepoint are the same so all components of the memory state at the safepoint should have been
732 // reset.
733 MergeMemNode* mm = nullptr;
734 #ifdef ASSERT
735 if (mem->is_MergeMem()) {
736 mm = mem->clone()->as_MergeMem();
737 _igvn._worklist.push(mm);
738 for (MergeMemStream mms(mem->as_MergeMem()); mms.next_non_empty(); ) {
739 // Loop invariant memory state won't be reset by no_side_effect_since_safepoint(). Do it here.
740 // Escape Analysis can add state to mm that it doesn't add to the backedge memory Phis, breaking verification
741 // code that relies on mm. Clear that extra state here.
742 if (mms.alias_idx() != Compile::AliasIdxBot &&
743 (loop != get_loop(ctrl_or_self(mms.memory())) ||
744 (mms.adr_type()->isa_oop_ptr() && mms.adr_type()->is_known_instance()))) {
745 mm->set_memory_at(mms.alias_idx(), mem->as_MergeMem()->base_memory());
746 }
747 }
748 }
749 #endif
750 if (!no_side_effect_since_safepoint(C, x, mem, mm, this)) {
751 safepoint = nullptr;
752 } else {
753 assert(mm == nullptr|| _igvn.transform(mm) == mem->as_MergeMem()->base_memory(), "all memory state should have been processed");
754 }
755 #ifdef ASSERT
756 if (mm != nullptr) {
757 _igvn.remove_dead_node(mm);
758 }
759 #endif
760 }
761 return safepoint;
762 }
763
764 // If the loop has the shape of a counted loop but with a long
765 // induction variable, transform the loop in a loop nest: an inner
766 // loop that iterates for at most max int iterations with an integer
767 // induction variable and an outer loop that iterates over the full
768 // range of long values from the initial loop in (at most) max int
769 // steps. That is:
770 //
771 // x: for (long phi = init; phi < limit; phi += stride) {
772 // // phi := Phi(L, init, incr)
773 // // incr := AddL(phi, longcon(stride))
774 // long incr = phi + stride;
775 // ... use phi and incr ...
776 // }
777 //
778 // OR:
779 //
780 // x: for (long phi = init; (phi += stride) < limit; ) {
781 // // phi := Phi(L, AddL(init, stride), incr)
782 // // incr := AddL(phi, longcon(stride))
783 // long incr = phi + stride;
784 // ... use phi and (phi + stride) ...
785 // }
786 //
787 // ==transform=>
788 //
789 // const ulong inner_iters_limit = INT_MAX - stride - 1; //near 0x7FFFFFF0
790 // assert(stride <= inner_iters_limit); // else abort transform
791 // assert((extralong)limit + stride <= LONG_MAX); // else deopt
792 // outer_head: for (long outer_phi = init;;) {
793 // // outer_phi := Phi(outer_head, init, AddL(outer_phi, I2L(inner_phi)))
794 // ulong inner_iters_max = (ulong) MAX(0, ((extralong)limit + stride - outer_phi));
795 // long inner_iters_actual = MIN(inner_iters_limit, inner_iters_max);
796 // assert(inner_iters_actual == (int)inner_iters_actual);
797 // int inner_phi, inner_incr;
798 // x: for (inner_phi = 0;; inner_phi = inner_incr) {
799 // // inner_phi := Phi(x, intcon(0), inner_incr)
800 // // inner_incr := AddI(inner_phi, intcon(stride))
801 // inner_incr = inner_phi + stride;
802 // if (inner_incr < inner_iters_actual) {
803 // ... use phi=>(outer_phi+inner_phi) ...
804 // continue;
805 // }
806 // else break;
807 // }
808 // if ((outer_phi+inner_phi) < limit) //OR (outer_phi+inner_incr) < limit
809 // continue;
810 // else break;
811 // }
812 //
813 // The same logic is used to transform an int counted loop that contains long range checks into a loop nest of 2 int
814 // loops with long range checks transformed to int range checks in the inner loop.
815 bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
816 Node* x = loop->_head;
817 // Only for inner loops
818 if (loop->_child != nullptr || !x->is_BaseCountedLoop() || x->as_Loop()->is_loop_nest_outer_loop()) {
819 return false;
820 }
821
822 if (x->is_CountedLoop() && !x->as_CountedLoop()->is_main_loop() && !x->as_CountedLoop()->is_normal_loop()) {
823 return false;
824 }
825
826 BaseCountedLoopNode* head = x->as_BaseCountedLoop();
827 BasicType bt = x->as_BaseCountedLoop()->bt();
828
829 check_counted_loop_shape(loop, x, bt);
830
831 #ifndef PRODUCT
832 if (bt == T_LONG) {
833 AtomicAccess::inc(&_long_loop_candidates);
834 }
835 #endif
836
837 jlong stride_con_long = head->stride_con();
838 assert(stride_con_long != 0, "missed some peephole opt");
839 // We can't iterate for more than max int at a time.
840 if (stride_con_long != (jint)stride_con_long || stride_con_long == min_jint) {
841 assert(bt == T_LONG, "only for long loops");
842 return false;
843 }
844 jint stride_con = checked_cast<jint>(stride_con_long);
845 // The number of iterations for the integer count loop: guarantee no
846 // overflow: max_jint - stride_con max. -1 so there's no need for a
847 // loop limit check if the exit test is <= or >=.
848 int iters_limit = max_jint - ABS(stride_con) - 1;
849 #ifdef ASSERT
850 if (bt == T_LONG && StressLongCountedLoop > 0) {
851 iters_limit = iters_limit / StressLongCountedLoop;
852 }
853 #endif
854 // At least 2 iterations so counted loop construction doesn't fail
855 if (iters_limit/ABS(stride_con) < 2) {
856 return false;
857 }
858
859 assert(iters_limit > 0, "can't be negative");
860
861 PhiNode* phi = head->phi()->as_Phi();
862
863 Node* back_control = head->in(LoopNode::LoopBackControl);
864
865 // data nodes on back branch not supported
866 if (back_control->outcnt() > 1) {
867 return false;
868 }
869
870 Node* limit = head->limit();
871 // We'll need to use the loop limit before the inner loop is entered
872 if (!is_dominator(get_ctrl(limit), x)) {
873 return false;
874 }
875
876 IfNode* exit_test = head->loopexit();
877
878 assert(back_control->Opcode() == Op_IfTrue, "wrong projection for back edge");
879
880 Node_List range_checks;
881 iters_limit = extract_long_range_checks(loop, stride_con, iters_limit, phi, range_checks);
882
883 if (bt == T_INT) {
884 // The only purpose of creating a loop nest is to handle long range checks. If there are none, do not proceed further.
885 if (range_checks.size() == 0) {
886 return false;
887 }
888 }
889
890 // Take what we know about the number of iterations of the long counted loop into account when computing the limit of
891 // the inner loop.
892 Node* init = head->init_trip();
893 const TypeInteger* lo = _igvn.type(init)->is_integer(bt);
894 const TypeInteger* hi = _igvn.type(limit)->is_integer(bt);
895 if (stride_con < 0) {
896 swap(lo, hi);
897 }
898 if (hi->hi_as_long() <= lo->lo_as_long()) {
899 // not a loop after all
900 return false;
901 }
902
903 if (range_checks.size() > 0) {
904 // This transformation requires peeling one iteration. Also, if it has range checks and they are eliminated by Loop
905 // Predication, then 2 Hoisted Check Predicates are added for one range check. Finally, transforming a long range
906 // check requires extra logic to be executed before the loop is entered and for the outer loop. As a result, the
907 // transformations can't pay off for a small number of iterations: roughly, if the loop runs for 3 iterations, it's
908 // going to execute as many range checks once transformed with range checks eliminated (1 peeled iteration with
909 // range checks + 2 predicates per range checks) as it would have not transformed. It also has to pay for the extra
910 // logic on loop entry and for the outer loop.
911 loop->compute_trip_count(this, bt);
912 if (head->is_CountedLoop() && head->as_CountedLoop()->has_exact_trip_count()) {
913 if (head->as_CountedLoop()->trip_count() <= 3) {
914 return false;
915 }
916 } else {
917 loop->compute_profile_trip_cnt(this);
918 if (!head->is_profile_trip_failed() && head->profile_trip_cnt() <= 3) {
919 return false;
920 }
921 }
922 }
923
924 if (try_make_short_running_loop(loop, stride_con, range_checks, iters_limit)) {
925 C->set_major_progress();
926 return true;
927 }
928
929 julong orig_iters = (julong)hi->hi_as_long() - lo->lo_as_long();
930 iters_limit = checked_cast<int>(MIN2((julong)iters_limit, orig_iters));
931
932 // We need a safepoint to insert Parse Predicates for the inner loop.
933 SafePointNode* safepoint;
934 if (bt == T_INT && head->as_CountedLoop()->is_strip_mined()) {
935 // Loop is strip mined: use the safepoint of the outer strip mined loop
936 OuterStripMinedLoopNode* outer_loop = head->as_CountedLoop()->outer_loop();
937 assert(outer_loop != nullptr, "no outer loop");
938 safepoint = outer_loop->outer_safepoint();
939 outer_loop->transform_to_counted_loop(&_igvn, this);
940 exit_test = head->loopexit();
941 } else {
942 safepoint = find_safepoint(back_control, x, loop);
943 }
944
945 Node* exit_branch = exit_test->proj_out(false);
946 Node* entry_control = head->in(LoopNode::EntryControl);
947
948 // Clone the control flow of the loop to build an outer loop
949 Node* outer_back_branch = back_control->clone();
950 Node* outer_exit_test = new IfNode(exit_test->in(0), exit_test->in(1), exit_test->_prob, exit_test->_fcnt);
951 Node* inner_exit_branch = exit_branch->clone();
952
953 LoopNode* outer_head = new LoopNode(entry_control, outer_back_branch);
954 IdealLoopTree* outer_ilt = insert_outer_loop(loop, outer_head, outer_back_branch);
955
956 const bool body_populated = true;
957 register_control(outer_head, outer_ilt, entry_control, body_populated);
958
959 _igvn.register_new_node_with_optimizer(inner_exit_branch);
960 set_loop(inner_exit_branch, outer_ilt);
961 set_idom(inner_exit_branch, exit_test, dom_depth(exit_branch));
962
963 outer_exit_test->set_req(0, inner_exit_branch);
964 register_control(outer_exit_test, outer_ilt, inner_exit_branch, body_populated);
965
966 _igvn.replace_input_of(exit_branch, 0, outer_exit_test);
967 set_idom(exit_branch, outer_exit_test, dom_depth(exit_branch));
968
969 outer_back_branch->set_req(0, outer_exit_test);
970 register_control(outer_back_branch, outer_ilt, outer_exit_test, body_populated);
971
972 _igvn.replace_input_of(x, LoopNode::EntryControl, outer_head);
973 set_idom(x, outer_head, dom_depth(x));
974
975 // add an iv phi to the outer loop and use it to compute the inner
976 // loop iteration limit
977 Node* outer_phi = phi->clone();
978 outer_phi->set_req(0, outer_head);
979 register_new_node(outer_phi, outer_head);
980
981 Node* inner_iters_max = nullptr;
982 if (stride_con > 0) {
983 inner_iters_max = MaxNode::max_diff_with_zero(limit, outer_phi, TypeInteger::bottom(bt), _igvn);
984 } else {
985 inner_iters_max = MaxNode::max_diff_with_zero(outer_phi, limit, TypeInteger::bottom(bt), _igvn);
986 }
987
988 Node* inner_iters_limit = _igvn.integercon(iters_limit, bt);
989 // inner_iters_max may not fit in a signed integer (iterating from
990 // Long.MIN_VALUE to Long.MAX_VALUE for instance). Use an unsigned
991 // min.
992 const TypeInteger* inner_iters_actual_range = TypeInteger::make(0, iters_limit, Type::WidenMin, bt);
993 Node* inner_iters_actual = MaxNode::unsigned_min(inner_iters_max, inner_iters_limit, inner_iters_actual_range, _igvn);
994
995 Node* inner_iters_actual_int;
996 if (bt == T_LONG) {
997 inner_iters_actual_int = new ConvL2INode(inner_iters_actual);
998 _igvn.register_new_node_with_optimizer(inner_iters_actual_int);
999 // When the inner loop is transformed to a counted loop, a loop limit check is not expected to be needed because
1000 // the loop limit is less or equal to max_jint - stride - 1 (if stride is positive but a similar argument exists for
1001 // a negative stride). We add a CastII here to guarantee that, when the counted loop is created in a subsequent loop
1002 // opts pass, an accurate range of values for the limits is found.
1003 const TypeInt* inner_iters_actual_int_range = TypeInt::make(0, iters_limit, Type::WidenMin);
1004 inner_iters_actual_int = new CastIINode(outer_head, inner_iters_actual_int, inner_iters_actual_int_range, ConstraintCastNode::UnconditionalDependency);
1005 _igvn.register_new_node_with_optimizer(inner_iters_actual_int);
1006 } else {
1007 inner_iters_actual_int = inner_iters_actual;
1008 }
1009
1010 Node* int_zero = intcon(0);
1011 if (stride_con < 0) {
1012 inner_iters_actual_int = new SubINode(int_zero, inner_iters_actual_int);
1013 _igvn.register_new_node_with_optimizer(inner_iters_actual_int);
1014 }
1015
1016 // Clone the iv data nodes as an integer iv
1017 Node* int_stride = intcon(stride_con);
1018 Node* inner_phi = new PhiNode(x->in(0), TypeInt::INT);
1019 Node* inner_incr = new AddINode(inner_phi, int_stride);
1020 Node* inner_cmp = nullptr;
1021 inner_cmp = new CmpINode(inner_incr, inner_iters_actual_int);
1022 Node* inner_bol = new BoolNode(inner_cmp, exit_test->in(1)->as_Bool()->_test._test);
1023 inner_phi->set_req(LoopNode::EntryControl, int_zero);
1024 inner_phi->set_req(LoopNode::LoopBackControl, inner_incr);
1025 register_new_node(inner_phi, x);
1026 register_new_node(inner_incr, x);
1027 register_new_node(inner_cmp, x);
1028 register_new_node(inner_bol, x);
1029
1030 _igvn.replace_input_of(exit_test, 1, inner_bol);
1031
1032 // Clone inner loop phis to outer loop
1033 for (uint i = 0; i < head->outcnt(); i++) {
1034 Node* u = head->raw_out(i);
1035 if (u->is_Phi() && u != inner_phi && u != phi) {
1036 assert(u->in(0) == head, "inconsistent");
1037 Node* clone = u->clone();
1038 clone->set_req(0, outer_head);
1039 register_new_node(clone, outer_head);
1040 _igvn.replace_input_of(u, LoopNode::EntryControl, clone);
1041 }
1042 }
1043
1044 // Replace inner loop long iv phi as inner loop int iv phi + outer
1045 // loop iv phi
1046 Node* iv_add = loop_nest_replace_iv(phi, inner_phi, outer_phi, head, bt);
1047
1048 set_subtree_ctrl(inner_iters_actual_int, body_populated);
1049
1050 LoopNode* inner_head = create_inner_head(loop, head, exit_test);
1051
1052 // Summary of steps from initial loop to loop nest:
1053 //
1054 // == old IR nodes =>
1055 //
1056 // entry_control: {...}
1057 // x:
1058 // for (long phi = init;;) {
1059 // // phi := Phi(x, init, incr)
1060 // // incr := AddL(phi, longcon(stride))
1061 // exit_test:
1062 // if (phi < limit)
1063 // back_control: fallthrough;
1064 // else
1065 // exit_branch: break;
1066 // long incr = phi + stride;
1067 // ... use phi and incr ...
1068 // phi = incr;
1069 // }
1070 //
1071 // == new IR nodes (just before final peel) =>
1072 //
1073 // entry_control: {...}
1074 // long adjusted_limit = limit + stride; //because phi_incr != nullptr
1075 // assert(!limit_check_required || (extralong)limit + stride == adjusted_limit); // else deopt
1076 // ulong inner_iters_limit = max_jint - ABS(stride) - 1; //near 0x7FFFFFF0
1077 // outer_head:
1078 // for (long outer_phi = init;;) {
1079 // // outer_phi := phi->clone(), in(0):=outer_head, => Phi(outer_head, init, incr)
1080 // // REPLACE phi => AddL(outer_phi, I2L(inner_phi))
1081 // // REPLACE incr => AddL(outer_phi, I2L(inner_incr))
1082 // // SO THAT outer_phi := Phi(outer_head, init, AddL(outer_phi, I2L(inner_incr)))
1083 // ulong inner_iters_max = (ulong) MAX(0, ((extralong)adjusted_limit - outer_phi) * SGN(stride));
1084 // int inner_iters_actual_int = (int) MIN(inner_iters_limit, inner_iters_max) * SGN(stride);
1085 // inner_head: x: //in(1) := outer_head
1086 // int inner_phi;
1087 // for (inner_phi = 0;;) {
1088 // // inner_phi := Phi(x, intcon(0), inner_phi + stride)
1089 // int inner_incr = inner_phi + stride;
1090 // bool inner_bol = (inner_incr < inner_iters_actual_int);
1091 // exit_test: //exit_test->in(1) := inner_bol;
1092 // if (inner_bol) // WAS (phi < limit)
1093 // back_control: fallthrough;
1094 // else
1095 // inner_exit_branch: break; //exit_branch->clone()
1096 // ... use phi=>(outer_phi+inner_phi) ...
1097 // inner_phi = inner_phi + stride; // inner_incr
1098 // }
1099 // outer_exit_test: //exit_test->clone(), in(0):=inner_exit_branch
1100 // if ((outer_phi+inner_phi) < limit) // WAS (phi < limit)
1101 // outer_back_branch: fallthrough; //back_control->clone(), in(0):=outer_exit_test
1102 // else
1103 // exit_branch: break; //in(0) := outer_exit_test
1104 // }
1105
1106 if (bt == T_INT) {
1107 outer_phi = new ConvI2LNode(outer_phi);
1108 register_new_node(outer_phi, outer_head);
1109 }
1110
1111 transform_long_range_checks(stride_con, range_checks, outer_phi, inner_iters_actual_int,
1112 inner_phi, iv_add, inner_head);
1113 // Peel one iteration of the loop and use the safepoint at the end
1114 // of the peeled iteration to insert Parse Predicates. If no well
1115 // positioned safepoint peel to guarantee a safepoint in the outer
1116 // loop.
1117 if (safepoint != nullptr || !loop->_has_call) {
1118 old_new.clear();
1119 do_peeling(loop, old_new);
1120 } else {
1121 C->set_major_progress();
1122 }
1123
1124 if (safepoint != nullptr) {
1125 SafePointNode* cloned_sfpt = old_new[safepoint->_idx]->as_SafePoint();
1126
1127 if (ShortRunningLongLoop) {
1128 add_parse_predicate(Deoptimization::Reason_short_running_long_loop, inner_head, outer_ilt, cloned_sfpt);
1129 }
1130 if (UseLoopPredicate) {
1131 add_parse_predicate(Deoptimization::Reason_predicate, inner_head, outer_ilt, cloned_sfpt);
1132 if (UseProfiledLoopPredicate) {
1133 add_parse_predicate(Deoptimization::Reason_profile_predicate, inner_head, outer_ilt, cloned_sfpt);
1134 }
1135 }
1136
1137 if (UseAutoVectorizationPredicate) {
1138 // We only want to use the auto-vectorization check as a trap once per bci. And
1139 // PhaseIdealLoop::add_parse_predicate only checks trap limits per method, so
1140 // we do a custom check here.
1141 if (!C->too_many_traps(cloned_sfpt->jvms()->method(), cloned_sfpt->jvms()->bci(), Deoptimization::Reason_auto_vectorization_check)) {
1142 add_parse_predicate(Deoptimization::Reason_auto_vectorization_check, inner_head, outer_ilt, cloned_sfpt);
1143 }
1144 }
1145
1146 add_parse_predicate(Deoptimization::Reason_loop_limit_check, inner_head, outer_ilt, cloned_sfpt);
1147 }
1148
1149 #ifndef PRODUCT
1150 if (bt == T_LONG) {
1151 AtomicAccess::inc(&_long_loop_nests);
1152 }
1153 #endif
1154
1155 inner_head->mark_loop_nest_inner_loop();
1156 outer_head->mark_loop_nest_outer_loop();
1157
1158 return true;
1159 }
1160
1161 // Make a copy of Parse/Template Assertion predicates below existing predicates at the loop passed as argument
1162 class CloneShortLoopPredicateVisitor : public PredicateVisitor {
1163 ClonePredicateToTargetLoop _clone_predicate_to_loop;
1164 PhaseIdealLoop* const _phase;
1165
1166 public:
1167 CloneShortLoopPredicateVisitor(LoopNode* target_loop_head,
1168 const NodeInSingleLoopBody &node_in_loop_body,
1169 PhaseIdealLoop* phase)
1170 : _clone_predicate_to_loop(target_loop_head, node_in_loop_body, phase),
1171 _phase(phase) {
1172 }
1173 NONCOPYABLE(CloneShortLoopPredicateVisitor);
1174
1175 using PredicateVisitor::visit;
1176
1177 void visit(const ParsePredicate& parse_predicate) override {
1178 _clone_predicate_to_loop.clone_parse_predicate(parse_predicate, true);
1179 parse_predicate.kill(_phase->igvn());
1180 }
1181
1182 void visit(const TemplateAssertionPredicate& template_assertion_predicate) override {
1183 _clone_predicate_to_loop.clone_template_assertion_predicate(template_assertion_predicate);
1184 template_assertion_predicate.kill(_phase->igvn());
1185 }
1186 };
1187
1188 // If the loop is either statically known to run for a small enough number of iterations or if profile data indicates
1189 // that, we don't want an outer loop because the overhead of having an outer loop whose backedge is never taken, has a
1190 // measurable cost. Furthermore, creating the loop nest usually causes one iteration of the loop to be peeled so
1191 // predicates can be set up. If the loop is short running, then it's an extra iteration that's run with range checks
1192 // (compared to an int counted loop with int range checks).
1193 //
1194 // In the short running case, turn the loop into a regular loop again and transform the long range checks:
1195 // - LongCountedLoop: Create LoopNode but keep the loop limit type with a CastLL node to avoid that we later try to
1196 // create a Loop Limit Check when turning the LoopNode into a CountedLoopNode.
1197 // - CountedLoop: Can be reused.
1198 bool PhaseIdealLoop::try_make_short_running_loop(IdealLoopTree* loop, jint stride_con, const Node_List &range_checks,
1199 const uint iters_limit) {
1200 if (!ShortRunningLongLoop) {
1201 return false;
1202 }
1203 BaseCountedLoopNode* head = loop->_head->as_BaseCountedLoop();
1204 BasicType bt = head->bt();
1205 Node* entry_control = head->skip_strip_mined()->in(LoopNode::EntryControl);
1206
1207 loop->compute_trip_count(this, bt);
1208 // Loop must run for no more than iter_limits as it guarantees no overflow of scale * iv in long range checks (see
1209 // comment above PhaseIdealLoop::transform_long_range_checks()).
1210 // iters_limit / ABS(stride_con) is the largest trip count for which we know it's correct to not create a loop nest:
1211 // it's always beneficial to have a single loop rather than a loop nest, so we try to apply this transformation as
1212 // often as possible.
1213 bool known_short_running_loop = head->trip_count() <= iters_limit / ABS(stride_con);
1214 bool profile_short_running_loop = false;
1215 if (!known_short_running_loop) {
1216 loop->compute_profile_trip_cnt(this);
1217 if (StressShortRunningLongLoop) {
1218 profile_short_running_loop = true;
1219 } else {
1220 profile_short_running_loop = !head->is_profile_trip_failed() && head->profile_trip_cnt() <= iters_limit / ABS(stride_con);
1221 }
1222 }
1223
1224 if (!known_short_running_loop && !profile_short_running_loop) {
1225 return false;
1226 }
1227
1228 Node* limit = head->limit();
1229 Node* init = head->init_trip();
1230
1231 Node* new_limit;
1232 if (stride_con > 0) {
1233 new_limit = SubNode::make(limit, init, bt);
1234 } else {
1235 new_limit = SubNode::make(init, limit, bt);
1236 }
1237 register_new_node(new_limit, entry_control);
1238
1239 PhiNode* phi = head->phi()->as_Phi();
1240 if (profile_short_running_loop) {
1241 // Add a Short Running Long Loop Predicate. It's the first predicate in the predicate chain before entering a loop
1242 // because a cast that's control dependent on the Short Running Long Loop Predicate is added to narrow the limit and
1243 // future predicates may be dependent on the new limit (so have to be between the loop and Short Running Long Loop
1244 // Predicate). The current limit could, itself, be dependent on an existing predicate. Clone parse and template
1245 // assertion predicates below existing predicates to get proper ordering of predicates when walking from the loop
1246 // up: future predicates, Short Running Long Loop Predicate, existing predicates.
1247 //
1248 // Existing Hoisted
1249 // Check Predicates
1250 // |
1251 // New Short Running Long
1252 // Loop Predicate
1253 // |
1254 // Cloned Parse Predicates and
1255 // Template Assertion Predicates
1256 // (future predicates added here)
1257 // |
1258 // Loop
1259 const Predicates predicates_before_cloning(entry_control);
1260 const PredicateBlock* short_running_long_loop_predicate_block = predicates_before_cloning.short_running_long_loop_predicate_block();
1261 if (!short_running_long_loop_predicate_block->has_parse_predicate()) { // already trapped
1262 return false;
1263 }
1264 PredicateIterator predicate_iterator(entry_control);
1265 NodeInSingleLoopBody node_in_short_loop_body(this, loop);
1266 CloneShortLoopPredicateVisitor clone_short_loop_predicates_visitor(head, node_in_short_loop_body, this);
1267 predicate_iterator.for_each(clone_short_loop_predicates_visitor);
1268
1269 entry_control = head->skip_strip_mined()->in(LoopNode::EntryControl);
1270
1271 const Predicates predicates_after_cloning(entry_control);
1272
1273 ParsePredicateSuccessProj* short_running_loop_predicate_proj = predicates_after_cloning.
1274 short_running_long_loop_predicate_block()->
1275 parse_predicate_success_proj();
1276 assert(short_running_loop_predicate_proj->in(0)->is_ParsePredicate(), "must be parse predicate");
1277
1278 const jlong iters_limit_long = iters_limit;
1279 Node* cmp_limit = CmpNode::make(new_limit, _igvn.integercon(iters_limit_long, bt), bt);
1280 Node* bol = new BoolNode(cmp_limit, BoolTest::le);
1281 Node* new_predicate_proj = create_new_if_for_predicate(short_running_loop_predicate_proj,
1282 nullptr,
1283 Deoptimization::Reason_short_running_long_loop,
1284 Op_If);
1285 Node* iff = new_predicate_proj->in(0);
1286 _igvn.replace_input_of(iff, 1, bol);
1287 register_new_node(cmp_limit, iff->in(0));
1288 register_new_node(bol, iff->in(0));
1289 new_limit = ConstraintCastNode::make_cast_for_basic_type(new_predicate_proj, new_limit,
1290 TypeInteger::make(1, iters_limit_long, Type::WidenMin, bt),
1291 ConstraintCastNode::UnconditionalDependency, bt);
1292 register_new_node(new_limit, new_predicate_proj);
1293
1294 #ifndef PRODUCT
1295 if (TraceLoopLimitCheck) {
1296 tty->print_cr("Short Long Loop Check Predicate generated:");
1297 DEBUG_ONLY(bol->dump(2);)
1298 }
1299 #endif
1300 entry_control = head->skip_strip_mined()->in(LoopNode::EntryControl);
1301 } else if (bt == T_LONG) {
1302 // We're turning a long counted loop into a regular loop that will be converted into an int counted loop. That loop
1303 // won't need loop limit check predicates (iters_limit guarantees that). Add a cast to make sure that, whatever
1304 // transformation happens by the time the counted loop is created (in a subsequent pass of loop opts), C2 knows
1305 // enough about the loop's limit that it doesn't try to add loop limit check predicates.
1306 const Predicates predicates(entry_control);
1307 const TypeLong* new_limit_t = new_limit->Value(&_igvn)->is_long();
1308 new_limit = ConstraintCastNode::make_cast_for_basic_type(predicates.entry(), new_limit,
1309 TypeLong::make(0, new_limit_t->_hi, new_limit_t->_widen),
1310 ConstraintCastNode::UnconditionalDependency, bt);
1311 register_new_node(new_limit, predicates.entry());
1312 } else {
1313 assert(bt == T_INT && known_short_running_loop, "only CountedLoop statically known to be short running");
1314 }
1315 IfNode* exit_test = head->loopexit();
1316
1317 if (bt == T_LONG) {
1318 // The loop is short running so new_limit fits into an int: either we determined that statically or added a guard
1319 new_limit = new ConvL2INode(new_limit);
1320 register_new_node(new_limit, entry_control);
1321 }
1322
1323 Node* int_zero = intcon(0);
1324 if (stride_con < 0) {
1325 new_limit = new SubINode(int_zero, new_limit);
1326 register_new_node(new_limit, entry_control);
1327 }
1328
1329 // Clone the iv data nodes as an integer iv
1330 Node* int_stride = intcon(stride_con);
1331 Node* inner_phi = new PhiNode(head, TypeInt::INT);
1332 Node* inner_incr = new AddINode(inner_phi, int_stride);
1333 Node* inner_cmp = new CmpINode(inner_incr, new_limit);
1334 Node* inner_bol = new BoolNode(inner_cmp, exit_test->in(1)->as_Bool()->_test._test);
1335 inner_phi->set_req(LoopNode::EntryControl, int_zero);
1336 inner_phi->set_req(LoopNode::LoopBackControl, inner_incr);
1337 register_new_node(inner_phi, head);
1338 register_new_node(inner_incr, head);
1339 register_new_node(inner_cmp, head);
1340 register_new_node(inner_bol, head);
1341
1342 _igvn.replace_input_of(exit_test, 1, inner_bol);
1343
1344 // Replace inner loop long iv phi as inner loop int iv phi + outer
1345 // loop iv phi
1346 Node* iv_add = loop_nest_replace_iv(phi, inner_phi, init, head, bt);
1347
1348 LoopNode* inner_head = head;
1349 if (bt == T_LONG) {
1350 // Turn the loop back to a counted loop
1351 inner_head = create_inner_head(loop, head, exit_test);
1352 } else {
1353 // Use existing counted loop
1354 revert_to_normal_loop(head);
1355 }
1356
1357 if (bt == T_INT) {
1358 init = new ConvI2LNode(init);
1359 register_new_node(init, entry_control);
1360 }
1361
1362 transform_long_range_checks(stride_con, range_checks, init, new_limit,
1363 inner_phi, iv_add, inner_head);
1364
1365 inner_head->mark_loop_nest_inner_loop();
1366
1367 return true;
1368 }
1369
1370 int PhaseIdealLoop::extract_long_range_checks(const IdealLoopTree* loop, jint stride_con, int iters_limit, PhiNode* phi,
1371 Node_List& range_checks) {
1372 const jlong min_iters = 2;
1373 jlong reduced_iters_limit = iters_limit;
1374 jlong original_iters_limit = iters_limit;
1375 for (uint i = 0; i < loop->_body.size(); i++) {
1376 Node* c = loop->_body.at(i);
1377 if (c->is_IfProj() && c->in(0)->is_RangeCheck()) {
1378 IfProjNode* if_proj = c->as_IfProj();
1379 CallStaticJavaNode* call = if_proj->is_uncommon_trap_if_pattern();
1380 if (call != nullptr) {
1381 Node* range = nullptr;
1382 Node* offset = nullptr;
1383 jlong scale = 0;
1384 if (loop->is_range_check_if(if_proj, this, T_LONG, phi, range, offset, scale) &&
1385 loop->is_invariant(range) && loop->is_invariant(offset) &&
1386 scale != min_jlong &&
1387 original_iters_limit / ABS(scale) >= min_iters * ABS(stride_con)) {
1388 assert(scale == (jint)scale, "scale should be an int");
1389 reduced_iters_limit = MIN2(reduced_iters_limit, original_iters_limit/ABS(scale));
1390 range_checks.push(c);
1391 }
1392 }
1393 }
1394 }
1395
1396 return checked_cast<int>(reduced_iters_limit);
1397 }
1398
1399 // One execution of the inner loop covers a sub-range of the entire iteration range of the loop: [A,Z), aka [A=init,
1400 // Z=limit). If the loop has at least one trip (which is the case here), the iteration variable i always takes A as its
1401 // first value, followed by A+S (S is the stride), next A+2S, etc. The limit is exclusive, so that the final value B of
1402 // i is never Z. It will be B=Z-1 if S=1, or B=Z+1 if S=-1.
1403
1404 // If |S|>1 the formula for the last value B would require a floor operation, specifically B=floor((Z-sgn(S)-A)/S)*S+A,
1405 // which is B=Z-sgn(S)U for some U in [1,|S|]. So when S>0, i ranges as i:[A,Z) or i:[A,B=Z-U], or else (in reverse)
1406 // as i:(Z,A] or i:[B=Z+U,A]. It will become important to reason about this inclusive range [A,B] or [B,A].
1407
1408 // Within the loop there may be many range checks. Each such range check (R.C.) is of the form 0 <= i*K+L < R, where K
1409 // is a scale factor applied to the loop iteration variable i, and L is some offset; K, L, and R are loop-invariant.
1410 // Because R is never negative (see below), this check can always be simplified to an unsigned check i*K+L <u R.
1411
1412 // When a long loop over a 64-bit variable i (outer_iv) is decomposed into a series of shorter sub-loops over a 32-bit
1413 // variable j (inner_iv), j ranges over a shorter interval j:[0,B_2] or [0,Z_2) (assuming S > 0), where the limit is
1414 // chosen to prevent various cases of 32-bit overflow (including multiplications j*K below). In the sub-loop the
1415 // logical value i is offset from j by a 64-bit constant C, so i ranges in i:C+[0,Z_2).
1416
1417 // For S<0, j ranges (in reverse!) through j:[-|B_2|,0] or (-|Z_2|,0]. For either sign of S, we can say i=j+C and j
1418 // ranges through 32-bit ranges [A_2,B_2] or [B_2,A_2] (A_2=0 of course).
1419
1420 // The disjoint union of all the C+[A_2,B_2] ranges from the sub-loops must be identical to the whole range [A,B].
1421 // Assuming S>0, the first C must be A itself, and the next C value is the previous C+B_2, plus S. If |S|=1, the next
1422 // C value is also the previous C+Z_2. In each sub-loop, j counts from j=A_2=0 and i counts from C+0 and exits at
1423 // j=B_2 (i=C+B_2), just before it gets to i=C+Z_2. Both i and j count up (from C and 0) if S>0; otherwise they count
1424 // down (from C and 0 again).
1425
1426 // Returning to range checks, we see that each i*K+L <u R expands to (C+j)*K+L <u R, or j*K+Q <u R, where Q=(C*K+L).
1427 // (Recall that K and L and R are loop-invariant scale, offset and range values for a particular R.C.) This is still a
1428 // 64-bit comparison, so the range check elimination logic will not apply to it. (The R.C.E. transforms operate only on
1429 // 32-bit indexes and comparisons, because they use 64-bit temporary values to avoid overflow; see
1430 // PhaseIdealLoop::add_constraint.)
1431
1432 // We must transform this comparison so that it gets the same answer, but by means of a 32-bit R.C. (using j not i) of
1433 // the form j*K+L_2 <u32 R_2. Note that L_2 and R_2 must be loop-invariant, but only with respect to the sub-loop. Thus, the
1434 // problem reduces to computing values for L_2 and R_2 (for each R.C. in the loop) in the loop header for the sub-loop.
1435 // Then the standard R.C.E. transforms can take those as inputs and further compute the necessary minimum and maximum
1436 // values for the 32-bit counter j within which the range checks can be eliminated.
1437
1438 // So, given j*K+Q <u R, we need to find some j*K+L_2 <u32 R_2, where L_2 and R_2 fit in 32 bits, and the 32-bit operations do
1439 // not overflow. We also need to cover the cases where i*K+L (= j*K+Q) overflows to a 64-bit negative, since that is
1440 // allowed as an input to the R.C., as long as the R.C. as a whole fails.
1441
1442 // If 32-bit multiplication j*K might overflow, we adjust the sub-loop limit Z_2 closer to zero to reduce j's range.
1443
1444 // For each R.C. j*K+Q <u32 R, the range of mathematical values of j*K+Q in the sub-loop is [Q_min, Q_max], where
1445 // Q_min=Q and Q_max=B_2*K+Q (if S>0 and K>0), Q_min=A_2*K+Q and Q_max=Q (if S<0 and K>0),
1446 // Q_min=B_2*K+Q and Q_max=Q if (S>0 and K<0), Q_min=Q and Q_max=A_2*K+Q (if S<0 and K<0)
1447
1448 // Note that the first R.C. value is always Q=(S*K>0 ? Q_min : Q_max). Also Q_{min,max} = Q + {min,max}(A_2*K,B_2*K).
1449 // If S*K>0 then, as the loop iterations progress, each R.C. value i*K+L = j*K+Q goes up from Q=Q_min towards Q_max.
1450 // If S*K<0 then j*K+Q starts at Q=Q_max and goes down towards Q_min.
1451
1452 // Case A: Some Negatives (but no overflow).
1453 // Number line:
1454 // |s64_min . . . 0 . . . s64_max|
1455 // | . Q_min..Q_max . 0 . . . . | s64 negative
1456 // | . . . . R=0 R< R< R< R< | (against R values)
1457 // | . . . Q_min..0..Q_max . . . | small mixed
1458 // | . . . . R R R< R< R< | (against R values)
1459 //
1460 // R values which are out of range (>Q_max+1) are reduced to max(0,Q_max+1). They are marked on the number line as R<.
1461 //
1462 // So, if Q_min <s64 0, then use this test:
1463 // j*K + s32_trunc(Q_min) <u32 clamp(R, 0, Q_max+1) if S*K>0 (R.C.E. steps upward)
1464 // j*K + s32_trunc(Q_max) <u32 clamp(R, 0, Q_max+1) if S*K<0 (R.C.E. steps downward)
1465 // Both formulas reduce to adding j*K to the 32-bit truncated value of the first R.C. expression value, Q:
1466 // j*K + s32_trunc(Q) <u32 clamp(R, 0, Q_max+1) for all S,K
1467
1468 // If the 32-bit truncation loses information, no harm is done, since certainly the clamp also will return R_2=zero.
1469
1470 // Case B: No Negatives.
1471 // Number line:
1472 // |s64_min . . . 0 . . . s64_max|
1473 // | . . . . 0 Q_min..Q_max . . | small positive
1474 // | . . . . R> R R R< R< | (against R values)
1475 // | . . . . 0 . Q_min..Q_max . | s64 positive
1476 // | . . . . R> R> R R R< | (against R values)
1477 //
1478 // R values which are out of range (<Q_min or >Q_max+1) are reduced as marked: R> up to Q_min, R< down to Q_max+1.
1479 // Then the whole comparison is shifted left by Q_min, so it can take place at zero, which is a nice 32-bit value.
1480 //
1481 // So, if both Q_min, Q_max+1 >=s64 0, then use this test:
1482 // j*K + 0 <u32 clamp(R, Q_min, Q_max+1) - Q_min if S*K>0
1483 // More generally:
1484 // j*K + Q - Q_min <u32 clamp(R, Q_min, Q_max+1) - Q_min for all S,K
1485
1486 // Case C: Overflow in the 64-bit domain
1487 // Number line:
1488 // |..Q_max-2^64 . . 0 . . . Q_min..| s64 overflow
1489 // | . . . . R> R> R> R> R | (against R values)
1490 //
1491 // In this case, Q_min >s64 Q_max+1, even though the mathematical values of Q_min and Q_max+1 are correctly ordered.
1492 // The formulas from the previous case can be used, except that the bad upper bound Q_max is replaced by max_jlong.
1493 // (In fact, we could use any replacement bound from R to max_jlong inclusive, as the input to the clamp function.)
1494 //
1495 // So if Q_min >=s64 0 but Q_max+1 <s64 0, use this test:
1496 // j*K + 0 <u32 clamp(R, Q_min, max_jlong) - Q_min if S*K>0
1497 // More generally:
1498 // j*K + Q - Q_min <u32 clamp(R, Q_min, max_jlong) - Q_min for all S,K
1499 //
1500 // Dropping the bad bound means only Q_min is used to reduce the range of R:
1501 // j*K + Q - Q_min <u32 max(Q_min, R) - Q_min for all S,K
1502 //
1503 // Here the clamp function is a 64-bit min/max that reduces the dynamic range of its R operand to the required [L,H]:
1504 // clamp(X, L, H) := max(L, min(X, H))
1505 // When degenerately L > H, it returns L not H.
1506 //
1507 // All of the formulas above can be merged into a single one:
1508 // L_clamp = Q_min < 0 ? 0 : Q_min --whether and how far to left-shift
1509 // H_clamp = Q_max+1 < Q_min ? max_jlong : Q_max+1
1510 // = Q_max+1 < 0 && Q_min >= 0 ? max_jlong : Q_max+1
1511 // Q_first = Q = (S*K>0 ? Q_min : Q_max) = (C*K+L)
1512 // R_clamp = clamp(R, L_clamp, H_clamp) --reduced dynamic range
1513 // replacement R.C.:
1514 // j*K + Q_first - L_clamp <u32 R_clamp - L_clamp
1515 // or equivalently:
1516 // j*K + L_2 <u32 R_2
1517 // where
1518 // L_2 = Q_first - L_clamp
1519 // R_2 = R_clamp - L_clamp
1520 //
1521 // Note on why R is never negative:
1522 //
1523 // Various details of this transformation would break badly if R could be negative, so this transformation only
1524 // operates after obtaining hard evidence that R<0 is impossible. For example, if R comes from a LoadRange node, we
1525 // know R cannot be negative. For explicit checks (of both int and long) a proof is constructed in
1526 // inline_preconditions_checkIndex, which triggers an uncommon trap if R<0, then wraps R in a ConstraintCastNode with a
1527 // non-negative type. Later on, when IdealLoopTree::is_range_check_if looks for an optimizable R.C., it checks that
1528 // the type of that R node is non-negative. Any "wild" R node that could be negative is not treated as an optimizable
1529 // R.C., but R values from a.length and inside checkIndex are good to go.
1530 //
1531 void PhaseIdealLoop::transform_long_range_checks(int stride_con, const Node_List &range_checks, Node* outer_phi,
1532 Node* inner_iters_actual_int, Node* inner_phi,
1533 Node* iv_add, LoopNode* inner_head) {
1534 Node* long_zero = longcon(0);
1535 Node* int_zero = intcon(0);
1536 Node* long_one = longcon(1);
1537 Node* int_stride = intcon(checked_cast<int>(stride_con));
1538
1539 for (uint i = 0; i < range_checks.size(); i++) {
1540 ProjNode* proj = range_checks.at(i)->as_Proj();
1541 RangeCheckNode* rc = proj->in(0)->as_RangeCheck();
1542 jlong scale = 0;
1543 Node* offset = nullptr;
1544 Node* rc_bol = rc->in(1);
1545 Node* rc_cmp = rc_bol->in(1);
1546 if (rc_cmp->Opcode() == Op_CmpU) {
1547 // could be shared and have already been taken care of
1548 continue;
1549 }
1550 bool short_scale = false;
1551 bool ok = is_scaled_iv_plus_offset(rc_cmp->in(1), iv_add, T_LONG, &scale, &offset, &short_scale);
1552 assert(ok, "inconsistent: was tested before");
1553 Node* range = rc_cmp->in(2);
1554 Node* c = rc->in(0);
1555 Node* entry_control = inner_head->in(LoopNode::EntryControl);
1556
1557 Node* R = range;
1558 Node* K = longcon(scale);
1559
1560 Node* L = offset;
1561
1562 if (short_scale) {
1563 // This converts:
1564 // (int)i*K + L <u64 R
1565 // with K an int into:
1566 // i*(long)K + L <u64 unsigned_min((long)max_jint + L + 1, R)
1567 // to protect against an overflow of (int)i*K
1568 //
1569 // Because if (int)i*K overflows, there are K,L where:
1570 // (int)i*K + L <u64 R is false because (int)i*K+L overflows to a negative which becomes a huge u64 value.
1571 // But if i*(long)K + L is >u64 (long)max_jint and still is <u64 R, then
1572 // i*(long)K + L <u64 R is true.
1573 //
1574 // As a consequence simply converting i*K + L <u64 R to i*(long)K + L <u64 R could cause incorrect execution.
1575 //
1576 // It's always true that:
1577 // (int)i*K <u64 (long)max_jint + 1
1578 // which implies (int)i*K + L <u64 (long)max_jint + 1 + L
1579 // As a consequence:
1580 // i*(long)K + L <u64 unsigned_min((long)max_jint + L + 1, R)
1581 // is always false in case of overflow of i*K
1582 //
1583 // Note, there are also K,L where i*K overflows and
1584 // i*K + L <u64 R is true, but
1585 // i*(long)K + L <u64 unsigned_min((long)max_jint + L + 1, R) is false
1586 // So this transformation could cause spurious deoptimizations and failed range check elimination
1587 // (but not incorrect execution) for unlikely corner cases with overflow.
1588 // If this causes problems in practice, we could maybe direct execution to a post-loop, instead of deoptimizing.
1589 Node* max_jint_plus_one_long = longcon((jlong)max_jint + 1);
1590 Node* max_range = new AddLNode(max_jint_plus_one_long, L);
1591 register_new_node(max_range, entry_control);
1592 R = MaxNode::unsigned_min(R, max_range, TypeLong::POS, _igvn);
1593 set_subtree_ctrl(R, true);
1594 }
1595
1596 Node* C = outer_phi;
1597
1598 // Start with 64-bit values:
1599 // i*K + L <u64 R
1600 // (C+j)*K + L <u64 R
1601 // j*K + Q <u64 R where Q = Q_first = C*K+L
1602 Node* Q_first = new MulLNode(C, K);
1603 register_new_node(Q_first, entry_control);
1604 Q_first = new AddLNode(Q_first, L);
1605 register_new_node(Q_first, entry_control);
1606
1607 // Compute endpoints of the range of values j*K + Q.
1608 // Q_min = (j=0)*K + Q; Q_max = (j=B_2)*K + Q
1609 Node* Q_min = Q_first;
1610
1611 // Compute the exact ending value B_2 (which is really A_2 if S < 0)
1612 Node* B_2 = new LoopLimitNode(this->C, int_zero, inner_iters_actual_int, int_stride);
1613 register_new_node(B_2, entry_control);
1614 B_2 = new SubINode(B_2, int_stride);
1615 register_new_node(B_2, entry_control);
1616 B_2 = new ConvI2LNode(B_2);
1617 register_new_node(B_2, entry_control);
1618
1619 Node* Q_max = new MulLNode(B_2, K);
1620 register_new_node(Q_max, entry_control);
1621 Q_max = new AddLNode(Q_max, Q_first);
1622 register_new_node(Q_max, entry_control);
1623
1624 if (scale * stride_con < 0) {
1625 swap(Q_min, Q_max);
1626 }
1627 // Now, mathematically, Q_max > Q_min, and they are close enough so that (Q_max-Q_min) fits in 32 bits.
1628
1629 // L_clamp = Q_min < 0 ? 0 : Q_min
1630 Node* Q_min_cmp = new CmpLNode(Q_min, long_zero);
1631 register_new_node(Q_min_cmp, entry_control);
1632 Node* Q_min_bool = new BoolNode(Q_min_cmp, BoolTest::lt);
1633 register_new_node(Q_min_bool, entry_control);
1634 Node* L_clamp = new CMoveLNode(Q_min_bool, Q_min, long_zero, TypeLong::LONG);
1635 register_new_node(L_clamp, entry_control);
1636 // (This could also be coded bitwise as L_clamp = Q_min & ~(Q_min>>63).)
1637
1638 Node* Q_max_plus_one = new AddLNode(Q_max, long_one);
1639 register_new_node(Q_max_plus_one, entry_control);
1640
1641 // H_clamp = Q_max+1 < Q_min ? max_jlong : Q_max+1
1642 // (Because Q_min and Q_max are close, the overflow check could also be encoded as Q_max+1 < 0 & Q_min >= 0.)
1643 Node* max_jlong_long = longcon(max_jlong);
1644 Node* Q_max_cmp = new CmpLNode(Q_max_plus_one, Q_min);
1645 register_new_node(Q_max_cmp, entry_control);
1646 Node* Q_max_bool = new BoolNode(Q_max_cmp, BoolTest::lt);
1647 register_new_node(Q_max_bool, entry_control);
1648 Node* H_clamp = new CMoveLNode(Q_max_bool, Q_max_plus_one, max_jlong_long, TypeLong::LONG);
1649 register_new_node(H_clamp, entry_control);
1650 // (This could also be coded bitwise as H_clamp = ((Q_max+1)<<1 | M)>>>1 where M = (Q_max+1)>>63 & ~Q_min>>63.)
1651
1652 // R_2 = clamp(R, L_clamp, H_clamp) - L_clamp
1653 // that is: R_2 = clamp(R, L_clamp=0, H_clamp=Q_max) if Q_min < 0
1654 // or else: R_2 = clamp(R, L_clamp, H_clamp) - Q_min if Q_min >= 0
1655 // and also: R_2 = clamp(R, L_clamp, Q_max+1) - L_clamp if Q_min < Q_max+1 (no overflow)
1656 // or else: R_2 = clamp(R, L_clamp, *no limit*)- L_clamp if Q_max+1 < Q_min (overflow)
1657 Node* R_2 = clamp(R, L_clamp, H_clamp);
1658 R_2 = new SubLNode(R_2, L_clamp);
1659 register_new_node(R_2, entry_control);
1660 R_2 = new ConvL2INode(R_2, TypeInt::POS);
1661 register_new_node(R_2, entry_control);
1662
1663 // L_2 = Q_first - L_clamp
1664 // We are subtracting L_clamp from both sides of the <u32 comparison.
1665 // If S*K>0, then Q_first == 0 and the R.C. expression at -L_clamp and steps upward to Q_max-L_clamp.
1666 // If S*K<0, then Q_first != 0 and the R.C. expression starts high and steps downward to Q_min-L_clamp.
1667 Node* L_2 = new SubLNode(Q_first, L_clamp);
1668 register_new_node(L_2, entry_control);
1669 L_2 = new ConvL2INode(L_2, TypeInt::INT);
1670 register_new_node(L_2, entry_control);
1671
1672 // Transform the range check using the computed values L_2/R_2
1673 // from: i*K + L <u64 R
1674 // to: j*K + L_2 <u32 R_2
1675 // that is:
1676 // (j*K + Q_first) - L_clamp <u32 clamp(R, L_clamp, H_clamp) - L_clamp
1677 K = intcon(checked_cast<int>(scale));
1678 Node* scaled_iv = new MulINode(inner_phi, K);
1679 register_new_node(scaled_iv, c);
1680 Node* scaled_iv_plus_offset = new AddINode(scaled_iv, L_2);
1681 register_new_node(scaled_iv_plus_offset, c);
1682
1683 Node* new_rc_cmp = new CmpUNode(scaled_iv_plus_offset, R_2);
1684 register_new_node(new_rc_cmp, c);
1685
1686 _igvn.replace_input_of(rc_bol, 1, new_rc_cmp);
1687 }
1688 }
1689
1690 Node* PhaseIdealLoop::clamp(Node* R, Node* L, Node* H) {
1691 Node* min = MaxNode::signed_min(R, H, TypeLong::LONG, _igvn);
1692 set_subtree_ctrl(min, true);
1693 Node* max = MaxNode::signed_max(L, min, TypeLong::LONG, _igvn);
1694 set_subtree_ctrl(max, true);
1695 return max;
1696 }
1697
1698 LoopNode* PhaseIdealLoop::create_inner_head(IdealLoopTree* loop, BaseCountedLoopNode* head,
1699 IfNode* exit_test) {
1700 LoopNode* new_inner_head = new LoopNode(head->in(1), head->in(2));
1701 IfNode* new_inner_exit = new IfNode(exit_test->in(0), exit_test->in(1), exit_test->_prob, exit_test->_fcnt);
1702 _igvn.register_new_node_with_optimizer(new_inner_head);
1703 _igvn.register_new_node_with_optimizer(new_inner_exit);
1704 loop->_body.push(new_inner_head);
1705 loop->_body.push(new_inner_exit);
1706 loop->_body.yank(head);
1707 loop->_body.yank(exit_test);
1708 set_loop(new_inner_head, loop);
1709 set_loop(new_inner_exit, loop);
1710 set_idom(new_inner_head, idom(head), dom_depth(head));
1711 set_idom(new_inner_exit, idom(exit_test), dom_depth(exit_test));
1712 lazy_replace(head, new_inner_head);
1713 lazy_replace(exit_test, new_inner_exit);
1714 loop->_head = new_inner_head;
1715 return new_inner_head;
1716 }
1717
1718 #ifdef ASSERT
1719 void PhaseIdealLoop::check_counted_loop_shape(IdealLoopTree* loop, Node* x, BasicType bt) {
1720 Node* back_control = loop_exit_control(x, loop);
1721 assert(back_control != nullptr, "no back control");
1722
1723 BoolTest::mask mask = BoolTest::illegal;
1724 float cl_prob = 0;
1725 Node* incr = nullptr;
1726 Node* limit = nullptr;
1727
1728 Node* cmp = loop_exit_test(back_control, loop, incr, limit, mask, cl_prob);
1729 assert(cmp != nullptr && cmp->Opcode() == Op_Cmp(bt), "no exit test");
1730
1731 Node* phi_incr = nullptr;
1732 incr = loop_iv_incr(incr, x, loop, phi_incr);
1733 assert(incr != nullptr && incr->Opcode() == Op_Add(bt), "no incr");
1734
1735 Node* xphi = nullptr;
1736 Node* stride = loop_iv_stride(incr, xphi);
1737
1738 assert(stride != nullptr, "no stride");
1739
1740 PhiNode* phi = loop_iv_phi(xphi, phi_incr, x);
1741
1742 assert(phi != nullptr && phi->in(LoopNode::LoopBackControl) == incr, "No phi");
1743
1744 jlong stride_con = stride->get_integer_as_long(bt);
1745
1746 assert(condition_stride_ok(mask, stride_con), "illegal condition");
1747
1748 assert(mask != BoolTest::ne, "unexpected condition");
1749 assert(phi_incr == nullptr, "bad loop shape");
1750 assert(cmp->in(1) == incr, "bad exit test shape");
1751
1752 // Safepoint on backedge not supported
1753 assert(x->in(LoopNode::LoopBackControl)->Opcode() != Op_SafePoint, "no safepoint on backedge");
1754 }
1755 #endif
1756
1757 #ifdef ASSERT
1758 // convert an int counted loop to a long counted to stress handling of
1759 // long counted loops
1760 bool PhaseIdealLoop::convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* loop) {
1761 Unique_Node_List iv_nodes;
1762 Node_List old_new;
1763 iv_nodes.push(cmp);
1764 bool failed = false;
1765
1766 for (uint i = 0; i < iv_nodes.size() && !failed; i++) {
1767 Node* n = iv_nodes.at(i);
1768 switch(n->Opcode()) {
1769 case Op_Phi: {
1770 Node* clone = new PhiNode(n->in(0), TypeLong::LONG);
1771 old_new.map(n->_idx, clone);
1772 break;
1773 }
1774 case Op_CmpI: {
1775 Node* clone = new CmpLNode(nullptr, nullptr);
1776 old_new.map(n->_idx, clone);
1777 break;
1778 }
1779 case Op_AddI: {
1780 Node* clone = new AddLNode(nullptr, nullptr);
1781 old_new.map(n->_idx, clone);
1782 break;
1783 }
1784 case Op_CastII: {
1785 failed = true;
1786 break;
1787 }
1788 default:
1789 DEBUG_ONLY(n->dump());
1790 fatal("unexpected");
1791 }
1792
1793 for (uint i = 1; i < n->req(); i++) {
1794 Node* in = n->in(i);
1795 if (in == nullptr) {
1796 continue;
1797 }
1798 if (loop->is_member(get_loop(get_ctrl(in)))) {
1799 iv_nodes.push(in);
1800 }
1801 }
1802 }
1803
1804 if (failed) {
1805 for (uint i = 0; i < iv_nodes.size(); i++) {
1806 Node* n = iv_nodes.at(i);
1807 Node* clone = old_new[n->_idx];
1808 if (clone != nullptr) {
1809 _igvn.remove_dead_node(clone);
1810 }
1811 }
1812 return false;
1813 }
1814
1815 for (uint i = 0; i < iv_nodes.size(); i++) {
1816 Node* n = iv_nodes.at(i);
1817 Node* clone = old_new[n->_idx];
1818 for (uint i = 1; i < n->req(); i++) {
1819 Node* in = n->in(i);
1820 if (in == nullptr) {
1821 continue;
1822 }
1823 Node* in_clone = old_new[in->_idx];
1824 if (in_clone == nullptr) {
1825 assert(_igvn.type(in)->isa_int(), "");
1826 in_clone = new ConvI2LNode(in);
1827 _igvn.register_new_node_with_optimizer(in_clone);
1828 set_subtree_ctrl(in_clone, false);
1829 }
1830 if (in_clone->in(0) == nullptr) {
1831 in_clone->set_req(0, C->top());
1832 clone->set_req(i, in_clone);
1833 in_clone->set_req(0, nullptr);
1834 } else {
1835 clone->set_req(i, in_clone);
1836 }
1837 }
1838 _igvn.register_new_node_with_optimizer(clone);
1839 }
1840 set_ctrl(old_new[phi->_idx], phi->in(0));
1841
1842 for (uint i = 0; i < iv_nodes.size(); i++) {
1843 Node* n = iv_nodes.at(i);
1844 Node* clone = old_new[n->_idx];
1845 set_subtree_ctrl(clone, false);
1846 Node* m = n->Opcode() == Op_CmpI ? clone : nullptr;
1847 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1848 Node* u = n->fast_out(i);
1849 if (iv_nodes.member(u)) {
1850 continue;
1851 }
1852 if (m == nullptr) {
1853 m = new ConvL2INode(clone);
1854 _igvn.register_new_node_with_optimizer(m);
1855 set_subtree_ctrl(m, false);
1856 }
1857 _igvn.rehash_node_delayed(u);
1858 int nb = u->replace_edge(n, m, &_igvn);
1859 --i, imax -= nb;
1860 }
1861 }
1862 return true;
1863 }
1864 #endif
1865
1866 //------------------------------is_counted_loop--------------------------------
1867 bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_bt) {
1868 PhaseGVN *gvn = &_igvn;
1869
1870 Node* back_control = loop_exit_control(x, loop);
1871 if (back_control == nullptr) {
1872 return false;
1873 }
1874
1875 BoolTest::mask bt = BoolTest::illegal;
1876 float cl_prob = 0;
1877 Node* incr = nullptr;
1878 Node* limit = nullptr;
1879 Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob);
1880 if (cmp == nullptr || cmp->Opcode() != Op_Cmp(iv_bt)) {
1881 return false; // Avoid pointer & float & 64-bit compares
1882 }
1883
1884 // Trip-counter increment must be commutative & associative.
1885 if (incr->Opcode() == Op_Cast(iv_bt)) {
1886 incr = incr->in(1);
1887 }
1888
1889 Node* phi_incr = nullptr;
1890 incr = loop_iv_incr(incr, x, loop, phi_incr);
1891 if (incr == nullptr) {
1892 return false;
1893 }
1894
1895 Node* trunc1 = nullptr;
1896 Node* trunc2 = nullptr;
1897 const TypeInteger* iv_trunc_t = nullptr;
1898 Node* orig_incr = incr;
1899 if (!(incr = CountedLoopNode::match_incr_with_optional_truncation(incr, &trunc1, &trunc2, &iv_trunc_t, iv_bt))) {
1900 return false; // Funny increment opcode
1901 }
1902 assert(incr->Opcode() == Op_Add(iv_bt), "wrong increment code");
1903
1904 Node* xphi = nullptr;
1905 Node* stride = loop_iv_stride(incr, xphi);
1906
1907 if (stride == nullptr) {
1908 return false;
1909 }
1910
1911 // Iteratively uncast the loop induction variable
1912 // until no more CastII/CastLL nodes are found.
1913 while (xphi->Opcode() == Op_Cast(iv_bt)) {
1914 xphi = xphi->in(1);
1915 }
1916
1917 // Stride must be constant
1918 jlong stride_con = stride->get_integer_as_long(iv_bt);
1919 assert(stride_con != 0, "missed some peephole opt");
1920
1921 PhiNode* phi = loop_iv_phi(xphi, phi_incr, x);
1922
1923 if (phi == nullptr ||
1924 (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) ||
1925 (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) {
1926 return false;
1927 }
1928
1929 Node* iftrue = back_control;
1930 uint iftrue_op = iftrue->Opcode();
1931 Node* iff = iftrue->in(0);
1932 BoolNode* test = iff->in(1)->as_Bool();
1933
1934 const TypeInteger* limit_t = gvn->type(limit)->is_integer(iv_bt);
1935 if (trunc1 != nullptr) {
1936 // When there is a truncation, we must be sure that after the truncation
1937 // the trip counter will end up higher than the limit, otherwise we are looking
1938 // at an endless loop. Can happen with range checks.
1939
1940 // Example:
1941 // int i = 0;
1942 // while (true)
1943 // sum + = array[i];
1944 // i++;
1945 // i = i && 0x7fff;
1946 // }
1947 //
1948 // If the array is shorter than 0x8000 this exits through a AIOOB
1949 // - Counted loop transformation is ok
1950 // If the array is longer then this is an endless loop
1951 // - No transformation can be done.
1952
1953 const TypeInteger* incr_t = gvn->type(orig_incr)->is_integer(iv_bt);
1954 if (limit_t->hi_as_long() > incr_t->hi_as_long()) {
1955 // if the limit can have a higher value than the increment (before the phi)
1956 return false;
1957 }
1958 }
1959
1960 Node *init_trip = phi->in(LoopNode::EntryControl);
1961
1962 // If iv trunc type is smaller than int, check for possible wrap.
1963 if (!TypeInteger::bottom(iv_bt)->higher_equal(iv_trunc_t)) {
1964 assert(trunc1 != nullptr, "must have found some truncation");
1965
1966 // Get a better type for the phi (filtered thru if's)
1967 const TypeInteger* phi_ft = filtered_type(phi);
1968
1969 // Can iv take on a value that will wrap?
1970 //
1971 // Ensure iv's limit is not within "stride" of the wrap value.
1972 //
1973 // Example for "short" type
1974 // Truncation ensures value is in the range -32768..32767 (iv_trunc_t)
1975 // If the stride is +10, then the last value of the induction
1976 // variable before the increment (phi_ft->_hi) must be
1977 // <= 32767 - 10 and (phi_ft->_lo) must be >= -32768 to
1978 // ensure no truncation occurs after the increment.
1979
1980 if (stride_con > 0) {
1981 if (iv_trunc_t->hi_as_long() - phi_ft->hi_as_long() < stride_con ||
1982 iv_trunc_t->lo_as_long() > phi_ft->lo_as_long()) {
1983 return false; // truncation may occur
1984 }
1985 } else if (stride_con < 0) {
1986 if (iv_trunc_t->lo_as_long() - phi_ft->lo_as_long() > stride_con ||
1987 iv_trunc_t->hi_as_long() < phi_ft->hi_as_long()) {
1988 return false; // truncation may occur
1989 }
1990 }
1991 // No possibility of wrap so truncation can be discarded
1992 // Promote iv type to Int
1993 } else {
1994 assert(trunc1 == nullptr && trunc2 == nullptr, "no truncation for int");
1995 }
1996
1997 if (!condition_stride_ok(bt, stride_con)) {
1998 return false;
1999 }
2000
2001 const TypeInteger* init_t = gvn->type(init_trip)->is_integer(iv_bt);
2002
2003 if (stride_con > 0) {
2004 if (init_t->lo_as_long() > max_signed_integer(iv_bt) - stride_con) {
2005 return false; // cyclic loop
2006 }
2007 } else {
2008 if (init_t->hi_as_long() < min_signed_integer(iv_bt) - stride_con) {
2009 return false; // cyclic loop
2010 }
2011 }
2012
2013 if (phi_incr != nullptr && bt != BoolTest::ne) {
2014 // check if there is a possibility of IV overflowing after the first increment
2015 if (stride_con > 0) {
2016 if (init_t->hi_as_long() > max_signed_integer(iv_bt) - stride_con) {
2017 return false;
2018 }
2019 } else {
2020 if (init_t->lo_as_long() < min_signed_integer(iv_bt) - stride_con) {
2021 return false;
2022 }
2023 }
2024 }
2025
2026 // =================================================
2027 // ---- SUCCESS! Found A Trip-Counted Loop! -----
2028 //
2029
2030 if (x->Opcode() == Op_Region) {
2031 // x has not yet been transformed to Loop or LongCountedLoop.
2032 // This should only happen if we are inside an infinite loop.
2033 // It happens like this:
2034 // build_loop_tree -> do not attach infinite loop and nested loops
2035 // beautify_loops -> does not transform the infinite and nested loops to LoopNode, because not attached yet
2036 // build_loop_tree -> find and attach infinite and nested loops
2037 // counted_loop -> nested Regions are not yet transformed to LoopNodes, we land here
2038 assert(x->as_Region()->is_in_infinite_subgraph(),
2039 "x can only be a Region and not Loop if inside infinite loop");
2040 // Come back later when Region is transformed to LoopNode
2041 return false;
2042 }
2043
2044 assert(x->Opcode() == Op_Loop || x->Opcode() == Op_LongCountedLoop, "regular loops only");
2045 C->print_method(PHASE_BEFORE_CLOOPS, 3, x);
2046
2047 // ===================================================
2048 // We can only convert this loop to a counted loop if we can guarantee that the iv phi will never overflow at runtime.
2049 // This is an implicit assumption taken by some loop optimizations. We therefore must ensure this property at all cost.
2050 // At this point, we've already excluded some trivial cases where an overflow could have been proven statically.
2051 // But even though we cannot prove that an overflow will *not* happen, we still want to speculatively convert this loop
2052 // to a counted loop. This can be achieved by adding additional iv phi overflow checks before the loop. If they fail,
2053 // we trap and resume execution before the loop without having executed any iteration of the loop, yet.
2054 //
2055 // These additional iv phi overflow checks can be inserted as Loop Limit Check Predicates above the Loop Limit Check
2056 // Parse Predicate which captures a JVM state just before the entry of the loop. If there is no such Parse Predicate,
2057 // we cannot generate a Loop Limit Check Predicate and thus cannot speculatively convert the loop to a counted loop.
2058 //
2059 // In the following, we only focus on int loops with stride > 0 to keep things simple. The argumentation and proof
2060 // for stride < 0 is analogously. For long loops, we would replace max_int with max_long.
2061 //
2062 //
2063 // The loop to be converted does not always need to have the often used shape:
2064 //
2065 // i = init
2066 // i = init loop:
2067 // do { ...
2068 // // ... equivalent i+=stride
2069 // i+=stride <==> if (i < limit)
2070 // } while (i < limit); goto loop
2071 // exit:
2072 // ...
2073 //
2074 // where the loop exit check uses the post-incremented iv phi and a '<'-operator.
2075 //
2076 // We could also have '<='-operator (or '>='-operator for negative strides) or use the pre-incremented iv phi value
2077 // in the loop exit check:
2078 //
2079 // i = init
2080 // loop:
2081 // ...
2082 // if (i <= limit)
2083 // i+=stride
2084 // goto loop
2085 // exit:
2086 // ...
2087 //
2088 // Let's define the following terms:
2089 // - iv_pre_i: The pre-incremented iv phi before the i-th iteration.
2090 // - iv_post_i: The post-incremented iv phi after the i-th iteration.
2091 //
2092 // The iv_pre_i and iv_post_i have the following relation:
2093 // iv_pre_i + stride = iv_post_i
2094 //
2095 // When converting a loop to a counted loop, we want to have a canonicalized loop exit check of the form:
2096 // iv_post_i < adjusted_limit
2097 //
2098 // If that is not the case, we need to canonicalize the loop exit check by using different values for adjusted_limit:
2099 // (LE1) iv_post_i < limit: Already canonicalized. We can directly use limit as adjusted_limit.
2100 // -> adjusted_limit = limit.
2101 // (LE2) iv_post_i <= limit:
2102 // iv_post_i < limit + 1
2103 // -> adjusted limit = limit + 1
2104 // (LE3) iv_pre_i < limit:
2105 // iv_pre_i + stride < limit + stride
2106 // iv_post_i < limit + stride
2107 // -> adjusted_limit = limit + stride
2108 // (LE4) iv_pre_i <= limit:
2109 // iv_pre_i < limit + 1
2110 // iv_pre_i + stride < limit + stride + 1
2111 // iv_post_i < limit + stride + 1
2112 // -> adjusted_limit = limit + stride + 1
2113 //
2114 // Note that:
2115 // (AL) limit <= adjusted_limit.
2116 //
2117 // The following loop invariant has to hold for counted loops with n iterations (i.e. loop exit check true after n-th
2118 // loop iteration) and a canonicalized loop exit check to guarantee that no iv_post_i over- or underflows:
2119 // (INV) For i = 1..n, min_int <= iv_post_i <= max_int
2120 //
2121 // To prove (INV), we require the following two conditions/assumptions:
2122 // (i): adjusted_limit - 1 + stride <= max_int
2123 // (ii): init < limit
2124 //
2125 // If we can prove (INV), we know that there can be no over- or underflow of any iv phi value. We prove (INV) by
2126 // induction by assuming (i) and (ii).
2127 //
2128 // Proof by Induction
2129 // ------------------
2130 // > Base case (i = 1): We show that (INV) holds after the first iteration:
2131 // min_int <= iv_post_1 = init + stride <= max_int
2132 // Proof:
2133 // First, we note that (ii) implies
2134 // (iii) init <= limit - 1
2135 // max_int >= adjusted_limit - 1 + stride [using (i)]
2136 // >= limit - 1 + stride [using (AL)]
2137 // >= init + stride [using (iii)]
2138 // >= min_int [using stride > 0, no underflow]
2139 // Thus, no overflow happens after the first iteration and (INV) holds for i = 1.
2140 //
2141 // Note that to prove the base case we need (i) and (ii).
2142 //
2143 // > Induction Hypothesis (i = j, j > 1): Assume that (INV) holds after the j-th iteration:
2144 // min_int <= iv_post_j <= max_int
2145 // > Step case (i = j + 1): We show that (INV) also holds after the j+1-th iteration:
2146 // min_int <= iv_post_{j+1} = iv_post_j + stride <= max_int
2147 // Proof:
2148 // If iv_post_j >= adjusted_limit:
2149 // We exit the loop after the j-th iteration, and we don't execute the j+1-th iteration anymore. Thus, there is
2150 // also no iv_{j+1}. Since (INV) holds for iv_j, there is nothing left to prove.
2151 // If iv_post_j < adjusted_limit:
2152 // First, we note that:
2153 // (iv) iv_post_j <= adjusted_limit - 1
2154 // max_int >= adjusted_limit - 1 + stride [using (i)]
2155 // >= iv_post_j + stride [using (iv)]
2156 // >= min_int [using stride > 0, no underflow]
2157 //
2158 // Note that to prove the step case we only need (i).
2159 //
2160 // Thus, by assuming (i) and (ii), we proved (INV).
2161 //
2162 //
2163 // It is therefore enough to add the following two Loop Limit Check Predicates to check assumptions (i) and (ii):
2164 //
2165 // (1) Loop Limit Check Predicate for (i):
2166 // Using (i): adjusted_limit - 1 + stride <= max_int
2167 //
2168 // This condition is now restated to use limit instead of adjusted_limit:
2169 //
2170 // To prevent an overflow of adjusted_limit -1 + stride itself, we rewrite this check to
2171 // max_int - stride + 1 >= adjusted_limit
2172 // We can merge the two constants into
2173 // canonicalized_correction = stride - 1
2174 // which gives us
2175 // max_int - canonicalized_correction >= adjusted_limit
2176 //
2177 // To directly use limit instead of adjusted_limit in the predicate condition, we split adjusted_limit into:
2178 // adjusted_limit = limit + limit_correction
2179 // Since stride > 0 and limit_correction <= stride + 1, we can restate this with no over- or underflow into:
2180 // max_int - canonicalized_correction - limit_correction >= limit
2181 // Since canonicalized_correction and limit_correction are both constants, we can replace them with a new constant:
2182 // (v) final_correction = canonicalized_correction + limit_correction
2183 //
2184 // which gives us:
2185 //
2186 // Final predicate condition:
2187 // max_int - final_correction >= limit
2188 //
2189 // However, we need to be careful that (v) does not over- or underflow.
2190 // We know that:
2191 // canonicalized_correction = stride - 1
2192 // and
2193 // limit_correction <= stride + 1
2194 // and thus
2195 // canonicalized_correction + limit_correction <= 2 * stride
2196 // To prevent an over- or underflow of (v), we must ensure that
2197 // 2 * stride <= max_int
2198 // which can safely be checked without over- or underflow with
2199 // (vi) stride != min_int AND abs(stride) <= max_int / 2
2200 //
2201 // We could try to further optimize the cases where (vi) does not hold but given that such large strides are
2202 // very uncommon and the loop would only run for a very few iterations anyway, we simply bail out if (vi) fails.
2203 //
2204 // (2) Loop Limit Check Predicate for (ii):
2205 // Using (ii): init < limit
2206 //
2207 // This Loop Limit Check Predicate is not required if we can prove at compile time that either:
2208 // (2.1) type(init) < type(limit)
2209 // In this case, we know:
2210 // all possible values of init < all possible values of limit
2211 // and we can skip the predicate.
2212 //
2213 // (2.2) init < limit is already checked before (i.e. found as a dominating check)
2214 // In this case, we do not need to re-check the condition and can skip the predicate.
2215 // This is often found for while- and for-loops which have the following shape:
2216 //
2217 // if (init < limit) { // Dominating test. Do not need the Loop Limit Check Predicate below.
2218 // i = init;
2219 // if (init >= limit) { trap(); } // Here we would insert the Loop Limit Check Predicate
2220 // do {
2221 // i += stride;
2222 // } while (i < limit);
2223 // }
2224 //
2225 // (2.3) init + stride <= max_int
2226 // In this case, there is no overflow of the iv phi after the first loop iteration.
2227 // In the proof of the base case above we showed that init + stride <= max_int by using assumption (ii):
2228 // init < limit
2229 // In the proof of the step case above, we did not need (ii) anymore. Therefore, if we already know at
2230 // compile time that init + stride <= max_int then we have trivially proven the base case and that
2231 // there is no overflow of the iv phi after the first iteration. In this case, we don't need to check (ii)
2232 // again and can skip the predicate.
2233
2234 // Check (vi) and bail out if the stride is too big.
2235 if (stride_con == min_signed_integer(iv_bt) || (ABS(stride_con) > max_signed_integer(iv_bt) / 2)) {
2236 return false;
2237 }
2238
2239 // Accounting for (LE3) and (LE4) where we use pre-incremented phis in the loop exit check.
2240 const jlong limit_correction_for_pre_iv_exit_check = (phi_incr != nullptr) ? stride_con : 0;
2241
2242 // Accounting for (LE2) and (LE4) where we use <= or >= in the loop exit check.
2243 const bool includes_limit = (bt == BoolTest::le || bt == BoolTest::ge);
2244 const jlong limit_correction_for_le_ge_exit_check = (includes_limit ? (stride_con > 0 ? 1 : -1) : 0);
2245
2246 const jlong limit_correction = limit_correction_for_pre_iv_exit_check + limit_correction_for_le_ge_exit_check;
2247 const jlong canonicalized_correction = stride_con + (stride_con > 0 ? -1 : 1);
2248 const jlong final_correction = canonicalized_correction + limit_correction;
2249
2250 int sov = check_stride_overflow(final_correction, limit_t, iv_bt);
2251 Node* init_control = x->in(LoopNode::EntryControl);
2252
2253 // If sov==0, limit's type always satisfies the condition, for
2254 // example, when it is an array length.
2255 if (sov != 0) {
2256 if (sov < 0) {
2257 return false; // Bailout: integer overflow is certain.
2258 }
2259 // (1) Loop Limit Check Predicate is required because we could not statically prove that
2260 // limit + final_correction = adjusted_limit - 1 + stride <= max_int
2261 assert(!x->as_Loop()->is_loop_nest_inner_loop(), "loop was transformed");
2262 const Predicates predicates(init_control);
2263 const PredicateBlock* loop_limit_check_predicate_block = predicates.loop_limit_check_predicate_block();
2264 if (!loop_limit_check_predicate_block->has_parse_predicate()) {
2265 // The Loop Limit Check Parse Predicate is not generated if this method trapped here before.
2266 #ifdef ASSERT
2267 if (TraceLoopLimitCheck) {
2268 tty->print("Missing Loop Limit Check Parse Predicate:");
2269 loop->dump_head();
2270 x->dump(1);
2271 }
2272 #endif
2273 return false;
2274 }
2275
2276 ParsePredicateNode* loop_limit_check_parse_predicate = loop_limit_check_predicate_block->parse_predicate();
2277 if (!is_dominator(get_ctrl(limit), loop_limit_check_parse_predicate->in(0))) {
2278 return false;
2279 }
2280
2281 Node* cmp_limit;
2282 Node* bol;
2283
2284 if (stride_con > 0) {
2285 cmp_limit = CmpNode::make(limit, _igvn.integercon(max_signed_integer(iv_bt) - final_correction, iv_bt), iv_bt);
2286 bol = new BoolNode(cmp_limit, BoolTest::le);
2287 } else {
2288 cmp_limit = CmpNode::make(limit, _igvn.integercon(min_signed_integer(iv_bt) - final_correction, iv_bt), iv_bt);
2289 bol = new BoolNode(cmp_limit, BoolTest::ge);
2290 }
2291
2292 insert_loop_limit_check_predicate(init_control->as_IfTrue(), cmp_limit, bol);
2293 }
2294
2295 // (2.3)
2296 const bool init_plus_stride_could_overflow =
2297 (stride_con > 0 && init_t->hi_as_long() > max_signed_integer(iv_bt) - stride_con) ||
2298 (stride_con < 0 && init_t->lo_as_long() < min_signed_integer(iv_bt) - stride_con);
2299 // (2.1)
2300 const bool init_gte_limit = (stride_con > 0 && init_t->hi_as_long() >= limit_t->lo_as_long()) ||
2301 (stride_con < 0 && init_t->lo_as_long() <= limit_t->hi_as_long());
2302
2303 if (init_gte_limit && // (2.1)
2304 ((bt == BoolTest::ne || init_plus_stride_could_overflow) && // (2.3)
2305 !has_dominating_loop_limit_check(init_trip, limit, stride_con, iv_bt, init_control))) { // (2.2)
2306 // (2) Iteration Loop Limit Check Predicate is required because neither (2.1), (2.2), nor (2.3) holds.
2307 // We use the following condition:
2308 // - stride > 0: init < limit
2309 // - stride < 0: init > limit
2310 //
2311 // This predicate is always required if we have a non-equal-operator in the loop exit check (where stride = 1 is
2312 // a requirement). We transform the loop exit check by using a less-than-operator. By doing so, we must always
2313 // check that init < limit. Otherwise, we could have a different number of iterations at runtime.
2314
2315 const Predicates predicates(init_control);
2316 const PredicateBlock* loop_limit_check_predicate_block = predicates.loop_limit_check_predicate_block();
2317 if (!loop_limit_check_predicate_block->has_parse_predicate()) {
2318 // The Loop Limit Check Parse Predicate is not generated if this method trapped here before.
2319 #ifdef ASSERT
2320 if (TraceLoopLimitCheck) {
2321 tty->print("Missing Loop Limit Check Parse Predicate:");
2322 loop->dump_head();
2323 x->dump(1);
2324 }
2325 #endif
2326 return false;
2327 }
2328
2329 ParsePredicateNode* loop_limit_check_parse_predicate = loop_limit_check_predicate_block->parse_predicate();
2330 Node* parse_predicate_entry = loop_limit_check_parse_predicate->in(0);
2331 if (!is_dominator(get_ctrl(limit), parse_predicate_entry) ||
2332 !is_dominator(get_ctrl(init_trip), parse_predicate_entry)) {
2333 return false;
2334 }
2335
2336 Node* cmp_limit;
2337 Node* bol;
2338
2339 if (stride_con > 0) {
2340 cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
2341 bol = new BoolNode(cmp_limit, BoolTest::lt);
2342 } else {
2343 cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
2344 bol = new BoolNode(cmp_limit, BoolTest::gt);
2345 }
2346
2347 insert_loop_limit_check_predicate(init_control->as_IfTrue(), cmp_limit, bol);
2348 }
2349
2350 if (bt == BoolTest::ne) {
2351 // Now we need to canonicalize the loop condition if it is 'ne'.
2352 assert(stride_con == 1 || stride_con == -1, "simple increment only - checked before");
2353 if (stride_con > 0) {
2354 // 'ne' can be replaced with 'lt' only when init < limit. This is ensured by the inserted predicate above.
2355 bt = BoolTest::lt;
2356 } else {
2357 assert(stride_con < 0, "must be");
2358 // 'ne' can be replaced with 'gt' only when init > limit. This is ensured by the inserted predicate above.
2359 bt = BoolTest::gt;
2360 }
2361 }
2362
2363 Node* sfpt = nullptr;
2364 if (loop->_child == nullptr) {
2365 sfpt = find_safepoint(back_control, x, loop);
2366 } else {
2367 sfpt = iff->in(0);
2368 if (sfpt->Opcode() != Op_SafePoint) {
2369 sfpt = nullptr;
2370 }
2371 }
2372
2373 if (x->in(LoopNode::LoopBackControl)->Opcode() == Op_SafePoint) {
2374 Node* backedge_sfpt = x->in(LoopNode::LoopBackControl);
2375 if (((iv_bt == T_INT && LoopStripMiningIter != 0) ||
2376 iv_bt == T_LONG) &&
2377 sfpt == nullptr) {
2378 // Leaving the safepoint on the backedge and creating a
2379 // CountedLoop will confuse optimizations. We can't move the
2380 // safepoint around because its jvm state wouldn't match a new
2381 // location. Give up on that loop.
2382 return false;
2383 }
2384 if (is_deleteable_safept(backedge_sfpt)) {
2385 lazy_replace(backedge_sfpt, iftrue);
2386 if (loop->_safepts != nullptr) {
2387 loop->_safepts->yank(backedge_sfpt);
2388 }
2389 loop->_tail = iftrue;
2390 }
2391 }
2392
2393
2394 #ifdef ASSERT
2395 if (iv_bt == T_INT &&
2396 !x->as_Loop()->is_loop_nest_inner_loop() &&
2397 StressLongCountedLoop > 0 &&
2398 trunc1 == nullptr &&
2399 convert_to_long_loop(cmp, phi, loop)) {
2400 return false;
2401 }
2402 #endif
2403
2404 Node* adjusted_limit = limit;
2405 if (phi_incr != nullptr) {
2406 // If compare points directly to the phi we need to adjust
2407 // the compare so that it points to the incr. Limit have
2408 // to be adjusted to keep trip count the same and we
2409 // should avoid int overflow.
2410 //
2411 // i = init; do {} while(i++ < limit);
2412 // is converted to
2413 // i = init; do {} while(++i < limit+1);
2414 //
2415 adjusted_limit = gvn->transform(AddNode::make(limit, stride, iv_bt));
2416 }
2417
2418 if (includes_limit) {
2419 // The limit check guaranties that 'limit <= (max_jint - stride)' so
2420 // we can convert 'i <= limit' to 'i < limit+1' since stride != 0.
2421 //
2422 Node* one = (stride_con > 0) ? gvn->integercon( 1, iv_bt) : gvn->integercon(-1, iv_bt);
2423 adjusted_limit = gvn->transform(AddNode::make(adjusted_limit, one, iv_bt));
2424 if (bt == BoolTest::le)
2425 bt = BoolTest::lt;
2426 else if (bt == BoolTest::ge)
2427 bt = BoolTest::gt;
2428 else
2429 ShouldNotReachHere();
2430 }
2431 set_subtree_ctrl(adjusted_limit, false);
2432
2433 // Build a canonical trip test.
2434 // Clone code, as old values may be in use.
2435 incr = incr->clone();
2436 incr->set_req(1,phi);
2437 incr->set_req(2,stride);
2438 incr = _igvn.register_new_node_with_optimizer(incr);
2439 set_early_ctrl(incr, false);
2440 _igvn.rehash_node_delayed(phi);
2441 phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn );
2442
2443 // If phi type is more restrictive than Int, raise to
2444 // Int to prevent (almost) infinite recursion in igvn
2445 // which can only handle integer types for constants or minint..maxint.
2446 if (!TypeInteger::bottom(iv_bt)->higher_equal(phi->bottom_type())) {
2447 Node* nphi = PhiNode::make(phi->in(0), phi->in(LoopNode::EntryControl), TypeInteger::bottom(iv_bt));
2448 nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl));
2449 nphi = _igvn.register_new_node_with_optimizer(nphi);
2450 set_ctrl(nphi, get_ctrl(phi));
2451 _igvn.replace_node(phi, nphi);
2452 phi = nphi->as_Phi();
2453 }
2454 cmp = cmp->clone();
2455 cmp->set_req(1,incr);
2456 cmp->set_req(2, adjusted_limit);
2457 cmp = _igvn.register_new_node_with_optimizer(cmp);
2458 set_ctrl(cmp, iff->in(0));
2459
2460 test = test->clone()->as_Bool();
2461 (*(BoolTest*)&test->_test)._test = bt;
2462 test->set_req(1,cmp);
2463 _igvn.register_new_node_with_optimizer(test);
2464 set_ctrl(test, iff->in(0));
2465
2466 // Replace the old IfNode with a new LoopEndNode
2467 Node *lex = _igvn.register_new_node_with_optimizer(BaseCountedLoopEndNode::make(iff->in(0), test, cl_prob, iff->as_If()->_fcnt, iv_bt));
2468 IfNode *le = lex->as_If();
2469 uint dd = dom_depth(iff);
2470 set_idom(le, le->in(0), dd); // Update dominance for loop exit
2471 set_loop(le, loop);
2472
2473 // Get the loop-exit control
2474 Node *iffalse = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue));
2475
2476 // Need to swap loop-exit and loop-back control?
2477 if (iftrue_op == Op_IfFalse) {
2478 Node *ift2=_igvn.register_new_node_with_optimizer(new IfTrueNode (le));
2479 Node *iff2=_igvn.register_new_node_with_optimizer(new IfFalseNode(le));
2480
2481 loop->_tail = back_control = ift2;
2482 set_loop(ift2, loop);
2483 set_loop(iff2, get_loop(iffalse));
2484
2485 // Lazy update of 'get_ctrl' mechanism.
2486 lazy_replace(iffalse, iff2);
2487 lazy_replace(iftrue, ift2);
2488
2489 // Swap names
2490 iffalse = iff2;
2491 iftrue = ift2;
2492 } else {
2493 _igvn.rehash_node_delayed(iffalse);
2494 _igvn.rehash_node_delayed(iftrue);
2495 iffalse->set_req_X( 0, le, &_igvn );
2496 iftrue ->set_req_X( 0, le, &_igvn );
2497 }
2498
2499 set_idom(iftrue, le, dd+1);
2500 set_idom(iffalse, le, dd+1);
2501 assert(iff->outcnt() == 0, "should be dead now");
2502 lazy_replace( iff, le ); // fix 'get_ctrl'
2503
2504 Node* entry_control = init_control;
2505 bool strip_mine_loop = iv_bt == T_INT &&
2506 loop->_child == nullptr &&
2507 sfpt != nullptr &&
2508 !loop->_has_call &&
2509 is_deleteable_safept(sfpt);
2510 IdealLoopTree* outer_ilt = nullptr;
2511 if (strip_mine_loop) {
2512 outer_ilt = create_outer_strip_mined_loop(init_control, loop, cl_prob, le->_fcnt,
2513 entry_control, iffalse);
2514 }
2515
2516 // Now setup a new CountedLoopNode to replace the existing LoopNode
2517 BaseCountedLoopNode *l = BaseCountedLoopNode::make(entry_control, back_control, iv_bt);
2518 l->set_unswitch_count(x->as_Loop()->unswitch_count()); // Preserve
2519 // The following assert is approximately true, and defines the intention
2520 // of can_be_counted_loop. It fails, however, because phase->type
2521 // is not yet initialized for this loop and its parts.
2522 //assert(l->can_be_counted_loop(this), "sanity");
2523 _igvn.register_new_node_with_optimizer(l);
2524 set_loop(l, loop);
2525 loop->_head = l;
2526 // Fix all data nodes placed at the old loop head.
2527 // Uses the lazy-update mechanism of 'get_ctrl'.
2528 lazy_replace( x, l );
2529 set_idom(l, entry_control, dom_depth(entry_control) + 1);
2530
2531 if (iv_bt == T_INT && (LoopStripMiningIter == 0 || strip_mine_loop)) {
2532 // Check for immediately preceding SafePoint and remove
2533 if (sfpt != nullptr && (strip_mine_loop || is_deleteable_safept(sfpt))) {
2534 if (strip_mine_loop) {
2535 Node* outer_le = outer_ilt->_tail->in(0);
2536 Node* sfpt_clone = sfpt->clone();
2537 sfpt_clone->set_req(0, iffalse);
2538 outer_le->set_req(0, sfpt_clone);
2539
2540 Node* polladdr = sfpt_clone->in(TypeFunc::Parms);
2541 if (polladdr != nullptr && polladdr->is_Load()) {
2542 // Polling load should be pinned outside inner loop.
2543 Node* new_polladdr = polladdr->clone();
2544 new_polladdr->set_req(0, iffalse);
2545 _igvn.register_new_node_with_optimizer(new_polladdr, polladdr);
2546 set_ctrl(new_polladdr, iffalse);
2547 sfpt_clone->set_req(TypeFunc::Parms, new_polladdr);
2548 }
2549 // When this code runs, loop bodies have not yet been populated.
2550 const bool body_populated = false;
2551 register_control(sfpt_clone, outer_ilt, iffalse, body_populated);
2552 set_idom(outer_le, sfpt_clone, dom_depth(sfpt_clone));
2553 }
2554 lazy_replace(sfpt, sfpt->in(TypeFunc::Control));
2555 if (loop->_safepts != nullptr) {
2556 loop->_safepts->yank(sfpt);
2557 }
2558 }
2559 }
2560
2561 #ifdef ASSERT
2562 assert(l->is_valid_counted_loop(iv_bt), "counted loop shape is messed up");
2563 assert(l == loop->_head && l->phi() == phi && l->loopexit_or_null() == lex, "" );
2564 #endif
2565 #ifndef PRODUCT
2566 if (TraceLoopOpts) {
2567 tty->print("Counted ");
2568 loop->dump_head();
2569 }
2570 #endif
2571
2572 C->print_method(PHASE_AFTER_CLOOPS, 3, l);
2573
2574 // Capture bounds of the loop in the induction variable Phi before
2575 // subsequent transformation (iteration splitting) obscures the
2576 // bounds
2577 l->phi()->as_Phi()->set_type(l->phi()->Value(&_igvn));
2578
2579 if (strip_mine_loop) {
2580 l->mark_strip_mined();
2581 l->verify_strip_mined(1);
2582 outer_ilt->_head->as_Loop()->verify_strip_mined(1);
2583 loop = outer_ilt;
2584 }
2585
2586 #ifndef PRODUCT
2587 if (x->as_Loop()->is_loop_nest_inner_loop() && iv_bt == T_LONG) {
2588 AtomicAccess::inc(&_long_loop_counted_loops);
2589 }
2590 #endif
2591 if (iv_bt == T_LONG && x->as_Loop()->is_loop_nest_outer_loop()) {
2592 l->mark_loop_nest_outer_loop();
2593 }
2594
2595 return true;
2596 }
2597
2598 // Check if there is a dominating loop limit check of the form 'init < limit' starting at the loop entry.
2599 // If there is one, then we do not need to create an additional Loop Limit Check Predicate.
2600 bool PhaseIdealLoop::has_dominating_loop_limit_check(Node* init_trip, Node* limit, const jlong stride_con,
2601 const BasicType iv_bt, Node* loop_entry) {
2602 // Eagerly call transform() on the Cmp and Bool node to common them up if possible. This is required in order to
2603 // successfully find a dominated test with the If node below.
2604 Node* cmp_limit;
2605 Node* bol;
2606 if (stride_con > 0) {
2607 cmp_limit = _igvn.transform(CmpNode::make(init_trip, limit, iv_bt));
2608 bol = _igvn.transform(new BoolNode(cmp_limit, BoolTest::lt));
2609 } else {
2610 cmp_limit = _igvn.transform(CmpNode::make(init_trip, limit, iv_bt));
2611 bol = _igvn.transform(new BoolNode(cmp_limit, BoolTest::gt));
2612 }
2613
2614 // Check if there is already a dominating init < limit check. If so, we do not need a Loop Limit Check Predicate.
2615 IfNode* iff = new IfNode(loop_entry, bol, PROB_MIN, COUNT_UNKNOWN);
2616 // Also add fake IfProj nodes in order to call transform() on the newly created IfNode.
2617 IfFalseNode* if_false = new IfFalseNode(iff);
2618 IfTrueNode* if_true = new IfTrueNode(iff);
2619 Node* dominated_iff = _igvn.transform(iff);
2620 // ConI node? Found dominating test (IfNode::dominated_by() returns a ConI node).
2621 const bool found_dominating_test = dominated_iff != nullptr && dominated_iff->is_ConI();
2622
2623 // Kill the If with its projections again in the next IGVN round by cutting it off from the graph.
2624 _igvn.replace_input_of(iff, 0, C->top());
2625 _igvn.replace_input_of(iff, 1, C->top());
2626 return found_dominating_test;
2627 }
2628
2629 //----------------------exact_limit-------------------------------------------
2630 Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) {
2631 assert(loop->_head->is_CountedLoop(), "");
2632 CountedLoopNode *cl = loop->_head->as_CountedLoop();
2633 assert(cl->is_valid_counted_loop(T_INT), "");
2634
2635 if (cl->stride_con() == 1 ||
2636 cl->stride_con() == -1 ||
2637 cl->limit()->Opcode() == Op_LoopLimit) {
2638 // Old code has exact limit (it could be incorrect in case of int overflow).
2639 // Loop limit is exact with stride == 1. And loop may already have exact limit.
2640 return cl->limit();
2641 }
2642 Node *limit = nullptr;
2643 #ifdef ASSERT
2644 BoolTest::mask bt = cl->loopexit()->test_trip();
2645 assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
2646 #endif
2647 if (cl->has_exact_trip_count()) {
2648 // Simple case: loop has constant boundaries.
2649 // Use jlongs to avoid integer overflow.
2650 int stride_con = cl->stride_con();
2651 jlong init_con = cl->init_trip()->get_int();
2652 jlong limit_con = cl->limit()->get_int();
2653 julong trip_cnt = cl->trip_count();
2654 jlong final_con = init_con + trip_cnt*stride_con;
2655 int final_int = (int)final_con;
2656 // The final value should be in integer range since the loop
2657 // is counted and the limit was checked for overflow.
2658 assert(final_con == (jlong)final_int, "final value should be integer");
2659 limit = _igvn.intcon(final_int);
2660 } else {
2661 // Create new LoopLimit node to get exact limit (final iv value).
2662 limit = new LoopLimitNode(C, cl->init_trip(), cl->limit(), cl->stride());
2663 register_new_node(limit, cl->in(LoopNode::EntryControl));
2664 }
2665 assert(limit != nullptr, "sanity");
2666 return limit;
2667 }
2668
2669 //------------------------------Ideal------------------------------------------
2670 // Return a node which is more "ideal" than the current node.
2671 // Attempt to convert into a counted-loop.
2672 Node *LoopNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2673 if (!can_be_counted_loop(phase) && !is_OuterStripMinedLoop()) {
2674 phase->C->set_major_progress();
2675 }
2676 return RegionNode::Ideal(phase, can_reshape);
2677 }
2678
2679 #ifdef ASSERT
2680 void LoopNode::verify_strip_mined(int expect_skeleton) const {
2681 const OuterStripMinedLoopNode* outer = nullptr;
2682 const CountedLoopNode* inner = nullptr;
2683 if (is_strip_mined()) {
2684 if (!is_valid_counted_loop(T_INT)) {
2685 return; // Skip malformed counted loop
2686 }
2687 assert(is_CountedLoop(), "no Loop should be marked strip mined");
2688 inner = as_CountedLoop();
2689 outer = inner->in(LoopNode::EntryControl)->as_OuterStripMinedLoop();
2690 } else if (is_OuterStripMinedLoop()) {
2691 outer = this->as_OuterStripMinedLoop();
2692 inner = outer->unique_ctrl_out()->as_CountedLoop();
2693 assert(inner->is_valid_counted_loop(T_INT) && inner->is_strip_mined(), "OuterStripMinedLoop should have been removed");
2694 assert(!is_strip_mined(), "outer loop shouldn't be marked strip mined");
2695 }
2696 if (inner != nullptr || outer != nullptr) {
2697 assert(inner != nullptr && outer != nullptr, "missing loop in strip mined nest");
2698 Node* outer_tail = outer->in(LoopNode::LoopBackControl);
2699 Node* outer_le = outer_tail->in(0);
2700 assert(outer_le->Opcode() == Op_OuterStripMinedLoopEnd, "tail of outer loop should be an If");
2701 Node* sfpt = outer_le->in(0);
2702 assert(sfpt->Opcode() == Op_SafePoint, "where's the safepoint?");
2703 Node* inner_out = sfpt->in(0);
2704 CountedLoopEndNode* cle = inner_out->in(0)->as_CountedLoopEnd();
2705 assert(cle == inner->loopexit_or_null(), "mismatch");
2706 bool has_skeleton = outer_le->in(1)->bottom_type()->singleton() && outer_le->in(1)->bottom_type()->is_int()->get_con() == 0;
2707 if (has_skeleton) {
2708 assert(expect_skeleton == 1 || expect_skeleton == -1, "unexpected skeleton node");
2709 assert(outer->outcnt() == 2, "only control nodes");
2710 } else {
2711 assert(expect_skeleton == 0 || expect_skeleton == -1, "no skeleton node?");
2712 uint phis = 0;
2713 uint be_loads = 0;
2714 Node* be = inner->in(LoopNode::LoopBackControl);
2715 for (DUIterator_Fast imax, i = inner->fast_outs(imax); i < imax; i++) {
2716 Node* u = inner->fast_out(i);
2717 if (u->is_Phi()) {
2718 phis++;
2719 for (DUIterator_Fast jmax, j = be->fast_outs(jmax); j < jmax; j++) {
2720 Node* n = be->fast_out(j);
2721 if (n->is_Load()) {
2722 assert(n->in(0) == be || n->find_prec_edge(be) > 0, "should be on the backedge");
2723 do {
2724 n = n->raw_out(0);
2725 } while (!n->is_Phi());
2726 if (n == u) {
2727 be_loads++;
2728 break;
2729 }
2730 }
2731 }
2732 }
2733 }
2734 assert(be_loads <= phis, "wrong number phis that depends on a pinned load");
2735 for (DUIterator_Fast imax, i = outer->fast_outs(imax); i < imax; i++) {
2736 Node* u = outer->fast_out(i);
2737 assert(u == outer || u == inner || u->is_Phi(), "nothing between inner and outer loop");
2738 }
2739 uint stores = 0;
2740 for (DUIterator_Fast imax, i = inner_out->fast_outs(imax); i < imax; i++) {
2741 Node* u = inner_out->fast_out(i);
2742 if (u->is_Store()) {
2743 stores++;
2744 }
2745 }
2746 // Late optimization of loads on backedge can cause Phi of outer loop to be eliminated but Phi of inner loop is
2747 // not guaranteed to be optimized out.
2748 assert(outer->outcnt() >= phis + 2 - be_loads && outer->outcnt() <= phis + 2 + stores + 1, "only phis");
2749 }
2750 assert(sfpt->outcnt() == 1, "no data node");
2751 assert(outer_tail->outcnt() == 1 || !has_skeleton, "no data node");
2752 }
2753 }
2754 #endif
2755
2756 //=============================================================================
2757 //------------------------------Ideal------------------------------------------
2758 // Return a node which is more "ideal" than the current node.
2759 // Attempt to convert into a counted-loop.
2760 Node *CountedLoopNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2761 return RegionNode::Ideal(phase, can_reshape);
2762 }
2763
2764 //------------------------------dump_spec--------------------------------------
2765 // Dump special per-node info
2766 #ifndef PRODUCT
2767 void CountedLoopNode::dump_spec(outputStream *st) const {
2768 LoopNode::dump_spec(st);
2769 if (stride_is_con()) {
2770 st->print("stride: %d ",stride_con());
2771 }
2772 if (is_pre_loop ()) st->print("pre of N%d" , _main_idx);
2773 if (is_main_loop()) st->print("main of N%d", _idx);
2774 if (is_post_loop()) st->print("post of N%d", _main_idx);
2775 if (is_strip_mined()) st->print(" strip mined");
2776 if (is_multiversion_fast_loop()) { st->print(" multiversion_fast"); }
2777 if (is_multiversion_slow_loop()) { st->print(" multiversion_slow"); }
2778 if (is_multiversion_delayed_slow_loop()) { st->print(" multiversion_delayed_slow"); }
2779 }
2780 #endif
2781
2782 //=============================================================================
2783 jlong BaseCountedLoopEndNode::stride_con() const {
2784 return stride()->bottom_type()->is_integer(bt())->get_con_as_long(bt());
2785 }
2786
2787
2788 BaseCountedLoopEndNode* BaseCountedLoopEndNode::make(Node* control, Node* test, float prob, float cnt, BasicType bt) {
2789 if (bt == T_INT) {
2790 return new CountedLoopEndNode(control, test, prob, cnt);
2791 }
2792 assert(bt == T_LONG, "unsupported");
2793 return new LongCountedLoopEndNode(control, test, prob, cnt);
2794 }
2795
2796 //=============================================================================
2797 //------------------------------Value-----------------------------------------
2798 const Type* LoopLimitNode::Value(PhaseGVN* phase) const {
2799 const Type* init_t = phase->type(in(Init));
2800 const Type* limit_t = phase->type(in(Limit));
2801 const Type* stride_t = phase->type(in(Stride));
2802 // Either input is TOP ==> the result is TOP
2803 if (init_t == Type::TOP) return Type::TOP;
2804 if (limit_t == Type::TOP) return Type::TOP;
2805 if (stride_t == Type::TOP) return Type::TOP;
2806
2807 int stride_con = stride_t->is_int()->get_con();
2808 if (stride_con == 1)
2809 return bottom_type(); // Identity
2810
2811 if (init_t->is_int()->is_con() && limit_t->is_int()->is_con()) {
2812 // Use jlongs to avoid integer overflow.
2813 jlong init_con = init_t->is_int()->get_con();
2814 jlong limit_con = limit_t->is_int()->get_con();
2815 int stride_m = stride_con - (stride_con > 0 ? 1 : -1);
2816 jlong trip_count = (limit_con - init_con + stride_m)/stride_con;
2817 jlong final_con = init_con + stride_con*trip_count;
2818 int final_int = (int)final_con;
2819 // The final value should be in integer range in almost all cases,
2820 // since the loop is counted and the limit was checked for overflow.
2821 // There some exceptions, for example:
2822 // - During CCP, there might be a temporary overflow from PhiNodes, see JDK-8309266.
2823 // - During PhaseIdealLoop::split_thru_phi, the LoopLimitNode floats possibly far above
2824 // the loop and its predicates, and we might get constants on one side of the phi that
2825 // would lead to overflows. Such a code path would never lead us to enter the loop
2826 // because of the loop limit overflow check that happens after the LoopLimitNode
2827 // computation with overflow, but before we enter the loop, see JDK-8335747.
2828 if (final_con == (jlong)final_int) {
2829 return TypeInt::make(final_int);
2830 } else {
2831 return bottom_type();
2832 }
2833 }
2834
2835 return bottom_type(); // TypeInt::INT
2836 }
2837
2838 //------------------------------Ideal------------------------------------------
2839 // Return a node which is more "ideal" than the current node.
2840 Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2841 if (phase->type(in(Init)) == Type::TOP ||
2842 phase->type(in(Limit)) == Type::TOP ||
2843 phase->type(in(Stride)) == Type::TOP)
2844 return nullptr; // Dead
2845
2846 int stride_con = phase->type(in(Stride))->is_int()->get_con();
2847 if (stride_con == 1)
2848 return nullptr; // Identity
2849
2850 // Delay following optimizations until all loop optimizations
2851 // done to keep Ideal graph simple.
2852 if (!can_reshape || !phase->C->post_loop_opts_phase()) {
2853 phase->C->record_for_post_loop_opts_igvn(this);
2854 return nullptr;
2855 }
2856
2857 const TypeInt* init_t = phase->type(in(Init) )->is_int();
2858 const TypeInt* limit_t = phase->type(in(Limit))->is_int();
2859 jlong stride_p;
2860 jlong lim, ini;
2861 julong max;
2862 if (stride_con > 0) {
2863 stride_p = stride_con;
2864 lim = limit_t->_hi;
2865 ini = init_t->_lo;
2866 max = (julong)max_jint;
2867 } else {
2868 stride_p = -(jlong)stride_con;
2869 lim = init_t->_hi;
2870 ini = limit_t->_lo;
2871 max = (julong)(juint)min_jint; // double cast to get 0x0000000080000000, not 0xffffffff80000000
2872 }
2873 julong range = lim - ini + stride_p;
2874 if (range <= max) {
2875 // Convert to integer expression if it is not overflow.
2876 Node* stride_m = phase->intcon(stride_con - (stride_con > 0 ? 1 : -1));
2877 Node *range = phase->transform(new SubINode(in(Limit), in(Init)));
2878 Node *bias = phase->transform(new AddINode(range, stride_m));
2879 Node *trip = phase->transform(new DivINode(nullptr, bias, in(Stride)));
2880 Node *span = phase->transform(new MulINode(trip, in(Stride)));
2881 return new AddINode(span, in(Init)); // exact limit
2882 }
2883
2884 if (is_power_of_2(stride_p) || // divisor is 2^n
2885 !Matcher::has_match_rule(Op_LoopLimit)) { // or no specialized Mach node?
2886 // Convert to long expression to avoid integer overflow
2887 // and let igvn optimizer convert this division.
2888 //
2889 Node* init = phase->transform( new ConvI2LNode(in(Init)));
2890 Node* limit = phase->transform( new ConvI2LNode(in(Limit)));
2891 Node* stride = phase->longcon(stride_con);
2892 Node* stride_m = phase->longcon(stride_con - (stride_con > 0 ? 1 : -1));
2893
2894 Node *range = phase->transform(new SubLNode(limit, init));
2895 Node *bias = phase->transform(new AddLNode(range, stride_m));
2896 Node *span;
2897 if (stride_con > 0 && is_power_of_2(stride_p)) {
2898 // bias >= 0 if stride >0, so if stride is 2^n we can use &(-stride)
2899 // and avoid generating rounding for division. Zero trip guard should
2900 // guarantee that init < limit but sometimes the guard is missing and
2901 // we can get situation when init > limit. Note, for the empty loop
2902 // optimization zero trip guard is generated explicitly which leaves
2903 // only RCE predicate where exact limit is used and the predicate
2904 // will simply fail forcing recompilation.
2905 Node* neg_stride = phase->longcon(-stride_con);
2906 span = phase->transform(new AndLNode(bias, neg_stride));
2907 } else {
2908 Node *trip = phase->transform(new DivLNode(nullptr, bias, stride));
2909 span = phase->transform(new MulLNode(trip, stride));
2910 }
2911 // Convert back to int
2912 Node *span_int = phase->transform(new ConvL2INode(span));
2913 return new AddINode(span_int, in(Init)); // exact limit
2914 }
2915
2916 return nullptr; // No progress
2917 }
2918
2919 //------------------------------Identity---------------------------------------
2920 // If stride == 1 return limit node.
2921 Node* LoopLimitNode::Identity(PhaseGVN* phase) {
2922 int stride_con = phase->type(in(Stride))->is_int()->get_con();
2923 if (stride_con == 1 || stride_con == -1)
2924 return in(Limit);
2925 return this;
2926 }
2927
2928 //=============================================================================
2929 //----------------------match_incr_with_optional_truncation--------------------
2930 // Match increment with optional truncation:
2931 // CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16
2932 // Return null for failure. Success returns the increment node.
2933 Node* CountedLoopNode::match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2,
2934 const TypeInteger** trunc_type,
2935 BasicType bt) {
2936 // Quick cutouts:
2937 if (expr == nullptr || expr->req() != 3) return nullptr;
2938
2939 Node *t1 = nullptr;
2940 Node *t2 = nullptr;
2941 Node* n1 = expr;
2942 int n1op = n1->Opcode();
2943 const TypeInteger* trunc_t = TypeInteger::bottom(bt);
2944
2945 if (bt == T_INT) {
2946 // Try to strip (n1 & M) or (n1 << N >> N) from n1.
2947 if (n1op == Op_AndI &&
2948 n1->in(2)->is_Con() &&
2949 n1->in(2)->bottom_type()->is_int()->get_con() == 0x7fff) {
2950 // %%% This check should match any mask of 2**K-1.
2951 t1 = n1;
2952 n1 = t1->in(1);
2953 n1op = n1->Opcode();
2954 trunc_t = TypeInt::CHAR;
2955 } else if (n1op == Op_RShiftI &&
2956 n1->in(1) != nullptr &&
2957 n1->in(1)->Opcode() == Op_LShiftI &&
2958 n1->in(2) == n1->in(1)->in(2) &&
2959 n1->in(2)->is_Con()) {
2960 jint shift = n1->in(2)->bottom_type()->is_int()->get_con();
2961 // %%% This check should match any shift in [1..31].
2962 if (shift == 16 || shift == 8) {
2963 t1 = n1;
2964 t2 = t1->in(1);
2965 n1 = t2->in(1);
2966 n1op = n1->Opcode();
2967 if (shift == 16) {
2968 trunc_t = TypeInt::SHORT;
2969 } else if (shift == 8) {
2970 trunc_t = TypeInt::BYTE;
2971 }
2972 }
2973 }
2974 }
2975
2976 // If (maybe after stripping) it is an AddI, we won:
2977 if (n1op == Op_Add(bt)) {
2978 *trunc1 = t1;
2979 *trunc2 = t2;
2980 *trunc_type = trunc_t;
2981 return n1;
2982 }
2983
2984 // failed
2985 return nullptr;
2986 }
2987
2988 IfNode* CountedLoopNode::find_multiversion_if_from_multiversion_fast_main_loop() {
2989 assert(is_main_loop() && is_multiversion_fast_loop(), "must be multiversion fast main loop");
2990 CountedLoopEndNode* pre_end = find_pre_loop_end();
2991 if (pre_end == nullptr) { return nullptr; }
2992 Node* pre_entry = pre_end->loopnode()->in(LoopNode::EntryControl);
2993 const Predicates predicates(pre_entry);
2994 IfTrueNode* before_predicates = predicates.entry()->isa_IfTrue();
2995 if (before_predicates != nullptr &&
2996 before_predicates->in(0)->in(1)->is_OpaqueMultiversioning()) {
2997 return before_predicates->in(0)->as_If();
2998 }
2999 return nullptr;
3000 }
3001
3002 LoopNode* CountedLoopNode::skip_strip_mined(int expect_skeleton) {
3003 if (is_strip_mined() && in(EntryControl) != nullptr && in(EntryControl)->is_OuterStripMinedLoop()) {
3004 verify_strip_mined(expect_skeleton);
3005 return in(EntryControl)->as_Loop();
3006 }
3007 return this;
3008 }
3009
3010 OuterStripMinedLoopNode* CountedLoopNode::outer_loop() const {
3011 assert(is_strip_mined(), "not a strip mined loop");
3012 Node* c = in(EntryControl);
3013 if (c == nullptr || c->is_top() || !c->is_OuterStripMinedLoop()) {
3014 return nullptr;
3015 }
3016 return c->as_OuterStripMinedLoop();
3017 }
3018
3019 IfTrueNode* OuterStripMinedLoopNode::outer_loop_tail() const {
3020 Node* c = in(LoopBackControl);
3021 if (c == nullptr || c->is_top()) {
3022 return nullptr;
3023 }
3024 return c->as_IfTrue();
3025 }
3026
3027 IfTrueNode* CountedLoopNode::outer_loop_tail() const {
3028 LoopNode* l = outer_loop();
3029 if (l == nullptr) {
3030 return nullptr;
3031 }
3032 return l->outer_loop_tail();
3033 }
3034
3035 OuterStripMinedLoopEndNode* OuterStripMinedLoopNode::outer_loop_end() const {
3036 IfTrueNode* proj = outer_loop_tail();
3037 if (proj == nullptr) {
3038 return nullptr;
3039 }
3040 Node* c = proj->in(0);
3041 if (c == nullptr || c->is_top() || c->outcnt() != 2) {
3042 return nullptr;
3043 }
3044 return c->as_OuterStripMinedLoopEnd();
3045 }
3046
3047 OuterStripMinedLoopEndNode* CountedLoopNode::outer_loop_end() const {
3048 LoopNode* l = outer_loop();
3049 if (l == nullptr) {
3050 return nullptr;
3051 }
3052 return l->outer_loop_end();
3053 }
3054
3055 IfFalseNode* OuterStripMinedLoopNode::outer_loop_exit() const {
3056 IfNode* le = outer_loop_end();
3057 if (le == nullptr) {
3058 return nullptr;
3059 }
3060 Node* c = le->proj_out_or_null(false);
3061 if (c == nullptr) {
3062 return nullptr;
3063 }
3064 return c->as_IfFalse();
3065 }
3066
3067 IfFalseNode* CountedLoopNode::outer_loop_exit() const {
3068 LoopNode* l = outer_loop();
3069 if (l == nullptr) {
3070 return nullptr;
3071 }
3072 return l->outer_loop_exit();
3073 }
3074
3075 SafePointNode* OuterStripMinedLoopNode::outer_safepoint() const {
3076 IfNode* le = outer_loop_end();
3077 if (le == nullptr) {
3078 return nullptr;
3079 }
3080 Node* c = le->in(0);
3081 if (c == nullptr || c->is_top()) {
3082 return nullptr;
3083 }
3084 assert(c->Opcode() == Op_SafePoint, "broken outer loop");
3085 return c->as_SafePoint();
3086 }
3087
3088 SafePointNode* CountedLoopNode::outer_safepoint() const {
3089 LoopNode* l = outer_loop();
3090 if (l == nullptr) {
3091 return nullptr;
3092 }
3093 return l->outer_safepoint();
3094 }
3095
3096 Node* CountedLoopNode::skip_assertion_predicates_with_halt() {
3097 Node* ctrl = in(LoopNode::EntryControl);
3098 if (ctrl == nullptr) {
3099 // Dying loop.
3100 return nullptr;
3101 }
3102 if (is_main_loop()) {
3103 ctrl = skip_strip_mined()->in(LoopNode::EntryControl);
3104 }
3105 if (is_main_loop() || is_post_loop()) {
3106 AssertionPredicates assertion_predicates(ctrl);
3107 return assertion_predicates.entry();
3108 }
3109 return ctrl;
3110 }
3111
3112
3113 int CountedLoopNode::stride_con() const {
3114 CountedLoopEndNode* cle = loopexit_or_null();
3115 return cle != nullptr ? cle->stride_con() : 0;
3116 }
3117
3118 BaseCountedLoopNode* BaseCountedLoopNode::make(Node* entry, Node* backedge, BasicType bt) {
3119 if (bt == T_INT) {
3120 return new CountedLoopNode(entry, backedge);
3121 }
3122 assert(bt == T_LONG, "unsupported");
3123 return new LongCountedLoopNode(entry, backedge);
3124 }
3125
3126 void OuterStripMinedLoopNode::fix_sunk_stores_when_back_to_counted_loop(PhaseIterGVN* igvn,
3127 PhaseIdealLoop* iloop) const {
3128 CountedLoopNode* inner_cl = inner_counted_loop();
3129 IfFalseNode* cle_out = inner_loop_exit();
3130
3131 if (cle_out->outcnt() > 1) {
3132 // Look for chains of stores that were sunk
3133 // out of the inner loop and are in the outer loop
3134 for (DUIterator_Fast imax, i = cle_out->fast_outs(imax); i < imax; i++) {
3135 Node* u = cle_out->fast_out(i);
3136 if (u->is_Store()) {
3137 int alias_idx = igvn->C->get_alias_index(u->adr_type());
3138 Node* first = u;
3139 for (;;) {
3140 Node* next = first->in(MemNode::Memory);
3141 if (!next->is_Store() || next->in(0) != cle_out) {
3142 break;
3143 }
3144 assert(igvn->C->get_alias_index(next->adr_type()) == alias_idx, "");
3145 first = next;
3146 }
3147 Node* last = u;
3148 for (;;) {
3149 Node* next = nullptr;
3150 for (DUIterator_Fast jmax, j = last->fast_outs(jmax); j < jmax; j++) {
3151 Node* uu = last->fast_out(j);
3152 if (uu->is_Store() && uu->in(0) == cle_out) {
3153 assert(next == nullptr, "only one in the outer loop");
3154 next = uu;
3155 assert(igvn->C->get_alias_index(next->adr_type()) == alias_idx, "");
3156 }
3157 }
3158 if (next == nullptr) {
3159 break;
3160 }
3161 last = next;
3162 }
3163 Node* phi = nullptr;
3164 for (DUIterator_Fast jmax, j = inner_cl->fast_outs(jmax); j < jmax; j++) {
3165 Node* uu = inner_cl->fast_out(j);
3166 if (uu->is_Phi()) {
3167 Node* be = uu->in(LoopNode::LoopBackControl);
3168 if (be->is_Store() && be->in(0) == inner_cl->in(LoopNode::LoopBackControl)) {
3169 assert(igvn->C->get_alias_index(uu->adr_type()) != alias_idx && igvn->C->get_alias_index(uu->adr_type()) != Compile::AliasIdxBot, "unexpected store");
3170 }
3171 if (be == last || be == first->in(MemNode::Memory)) {
3172 assert(igvn->C->get_alias_index(uu->adr_type()) == alias_idx || igvn->C->get_alias_index(uu->adr_type()) == Compile::AliasIdxBot, "unexpected alias");
3173 assert(phi == nullptr, "only one phi");
3174 phi = uu;
3175 }
3176 }
3177 }
3178 #ifdef ASSERT
3179 for (DUIterator_Fast jmax, j = inner_cl->fast_outs(jmax); j < jmax; j++) {
3180 Node* uu = inner_cl->fast_out(j);
3181 if (uu->is_memory_phi()) {
3182 if (uu->adr_type() == igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type()))) {
3183 assert(phi == uu, "what's that phi?");
3184 } else if (uu->adr_type() == TypePtr::BOTTOM) {
3185 Node* n = uu->in(LoopNode::LoopBackControl);
3186 uint limit = igvn->C->live_nodes();
3187 uint i = 0;
3188 while (n != uu) {
3189 i++;
3190 assert(i < limit, "infinite loop");
3191 if (n->is_Proj()) {
3192 n = n->in(0);
3193 } else if (n->is_SafePoint() || n->is_MemBar()) {
3194 n = n->in(TypeFunc::Memory);
3195 } else if (n->is_Phi()) {
3196 n = n->in(1);
3197 } else if (n->is_MergeMem()) {
3198 n = n->as_MergeMem()->memory_at(igvn->C->get_alias_index(u->adr_type()));
3199 } else if (n->is_Store() || n->is_LoadStore() || n->is_ClearArray()) {
3200 n = n->in(MemNode::Memory);
3201 } else {
3202 n->dump();
3203 ShouldNotReachHere();
3204 }
3205 }
3206 }
3207 }
3208 }
3209 #endif
3210 if (phi == nullptr) {
3211 // If an entire chains was sunk, the
3212 // inner loop has no phi for that memory
3213 // slice, create one for the outer loop
3214 phi = PhiNode::make(inner_cl, first->in(MemNode::Memory), Type::MEMORY,
3215 igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type())));
3216 phi->set_req(LoopNode::LoopBackControl, last);
3217 phi = register_new_node(phi, inner_cl, igvn, iloop);
3218 igvn->replace_input_of(first, MemNode::Memory, phi);
3219 } else {
3220 // Or fix the outer loop fix to include
3221 // that chain of stores.
3222 Node* be = phi->in(LoopNode::LoopBackControl);
3223 assert(!(be->is_Store() && be->in(0) == inner_cl->in(LoopNode::LoopBackControl)), "store on the backedge + sunk stores: unsupported");
3224 if (be == first->in(MemNode::Memory)) {
3225 if (be == phi->in(LoopNode::LoopBackControl)) {
3226 igvn->replace_input_of(phi, LoopNode::LoopBackControl, last);
3227 } else {
3228 igvn->replace_input_of(be, MemNode::Memory, last);
3229 }
3230 } else {
3231 #ifdef ASSERT
3232 if (be == phi->in(LoopNode::LoopBackControl)) {
3233 assert(phi->in(LoopNode::LoopBackControl) == last, "");
3234 } else {
3235 assert(be->in(MemNode::Memory) == last, "");
3236 }
3237 #endif
3238 }
3239 }
3240 }
3241 }
3242 }
3243 }
3244
3245 // The outer strip mined loop is initially only partially constructed. In particular Phis are omitted.
3246 // See comment above: PhaseIdealLoop::create_outer_strip_mined_loop()
3247 // We're now in the process of finishing the construction of the outer loop. For each Phi in the inner loop, a Phi in
3248 // the outer loop was just now created. However, Sunk Stores cause an extra challenge:
3249 // 1) If all Stores in the inner loop were sunk for a particular memory slice, there's no Phi left for that memory slice
3250 // in the inner loop anymore, and hence we did not yet add a Phi for the outer loop. So an extra Phi must now be
3251 // added for each chain of sunk Stores for a particular memory slice.
3252 // 2) If some Stores were sunk and some left in the inner loop, a Phi was already created in the outer loop but
3253 // its backedge input wasn't wired correctly to the last Store of the chain: the backedge input was set to the
3254 // backedge of the inner loop Phi instead, but it needs to be the last Store of the chain in the outer loop. We now
3255 // have to fix that too.
3256 void OuterStripMinedLoopNode::handle_sunk_stores_when_finishing_construction(PhaseIterGVN* igvn) {
3257 IfFalseNode* cle_exit_proj = inner_loop_exit();
3258
3259 // Find Sunk stores: Sunk stores are pinned on the loop exit projection of the inner loop. Indeed, because Sunk Stores
3260 // modify the memory state captured by the SafePoint in the outer strip mined loop, they must be above it. The
3261 // SafePoint's control input is the loop exit projection. It's also the only control out of the inner loop above the
3262 // SafePoint.
3263 #ifdef ASSERT
3264 int stores_in_outer_loop_cnt = 0;
3265 for (DUIterator_Fast imax, i = cle_exit_proj->fast_outs(imax); i < imax; i++) {
3266 Node* u = cle_exit_proj->fast_out(i);
3267 if (u->is_Store()) {
3268 stores_in_outer_loop_cnt++;
3269 }
3270 }
3271 #endif
3272
3273 // Sunk stores are reachable from the memory state of the outer loop safepoint
3274 SafePointNode* safepoint = outer_safepoint();
3275 MergeMemNode* mm = safepoint->in(TypeFunc::Memory)->isa_MergeMem();
3276 if (mm == nullptr) {
3277 // There is no MergeMem, which should only happen if there was no memory node
3278 // sunk out of the loop.
3279 assert(stores_in_outer_loop_cnt == 0, "inconsistent");
3280 return;
3281 }
3282 DEBUG_ONLY(int stores_in_outer_loop_cnt2 = 0);
3283 for (MergeMemStream mms(mm); mms.next_non_empty();) {
3284 Node* mem = mms.memory();
3285 // Traverse up the chain of stores to find the first store pinned
3286 // at the loop exit projection.
3287 Node* last = mem;
3288 Node* first = nullptr;
3289 while (mem->is_Store() && mem->in(0) == cle_exit_proj) {
3290 DEBUG_ONLY(stores_in_outer_loop_cnt2++);
3291 first = mem;
3292 mem = mem->in(MemNode::Memory);
3293 }
3294 if (first != nullptr) {
3295 // Found a chain of Stores that were sunk
3296 // Do we already have a memory Phi for that slice on the outer loop? If that is the case, that Phi was created
3297 // by cloning an inner loop Phi. The inner loop Phi should have mem, the memory state of the first Store out of
3298 // the inner loop, as input on the backedge. So does the outer loop Phi given it's a clone.
3299 Node* phi = nullptr;
3300 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
3301 Node* u = mem->fast_out(i);
3302 if (u->is_Phi() && u->in(0) == this && u->in(LoopBackControl) == mem) {
3303 assert(phi == nullptr, "there should be only one");
3304 phi = u;
3305 PRODUCT_ONLY(break);
3306 }
3307 }
3308 if (phi == nullptr) {
3309 // No outer loop Phi? create one
3310 phi = PhiNode::make(this, last);
3311 phi->set_req(EntryControl, mem);
3312 phi = igvn->transform(phi);
3313 igvn->replace_input_of(first, MemNode::Memory, phi);
3314 } else {
3315 // Fix memory state along the backedge: it should be the last sunk Store of the chain
3316 igvn->replace_input_of(phi, LoopBackControl, last);
3317 }
3318 }
3319 }
3320 assert(stores_in_outer_loop_cnt == stores_in_outer_loop_cnt2, "inconsistent");
3321 }
3322
3323 void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) {
3324 verify_strip_mined(1);
3325 // Look for the outer & inner strip mined loop, reduce number of
3326 // iterations of the inner loop, set exit condition of outer loop,
3327 // construct required phi nodes for outer loop.
3328 CountedLoopNode* inner_cl = inner_counted_loop();
3329 assert(inner_cl->is_strip_mined(), "inner loop should be strip mined");
3330 if (LoopStripMiningIter == 0) {
3331 remove_outer_loop_and_safepoint(igvn);
3332 return;
3333 }
3334 if (LoopStripMiningIter == 1) {
3335 transform_to_counted_loop(igvn, nullptr);
3336 return;
3337 }
3338 Node* inner_iv_phi = inner_cl->phi();
3339 if (inner_iv_phi == nullptr) {
3340 IfNode* outer_le = outer_loop_end();
3341 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
3342 igvn->replace_node(outer_le, iff);
3343 inner_cl->clear_strip_mined();
3344 return;
3345 }
3346 CountedLoopEndNode* inner_cle = inner_counted_loop_end();
3347
3348 int stride = inner_cl->stride_con();
3349 // For a min int stride, LoopStripMiningIter * stride overflows the int range for all values of LoopStripMiningIter
3350 // except 0 or 1. Those values are handled early on in this method and causes the method to return. So for a min int
3351 // stride, the method is guaranteed to return at the next check below.
3352 jlong scaled_iters_long = ((jlong)LoopStripMiningIter) * ABS((jlong)stride);
3353 int scaled_iters = (int)scaled_iters_long;
3354 if ((jlong)scaled_iters != scaled_iters_long) {
3355 // Remove outer loop and safepoint (too few iterations)
3356 remove_outer_loop_and_safepoint(igvn);
3357 return;
3358 }
3359 jlong short_scaled_iters = LoopStripMiningIterShortLoop * ABS(stride);
3360 const TypeInt* inner_iv_t = igvn->type(inner_iv_phi)->is_int();
3361 jlong iter_estimate = (jlong)inner_iv_t->_hi - (jlong)inner_iv_t->_lo;
3362 assert(iter_estimate > 0, "broken");
3363 if (iter_estimate <= short_scaled_iters) {
3364 // Remove outer loop and safepoint: loop executes less than LoopStripMiningIterShortLoop
3365 remove_outer_loop_and_safepoint(igvn);
3366 return;
3367 }
3368 if (iter_estimate <= scaled_iters_long) {
3369 // We would only go through one iteration of
3370 // the outer loop: drop the outer loop but
3371 // keep the safepoint so we don't run for
3372 // too long without a safepoint
3373 IfNode* outer_le = outer_loop_end();
3374 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
3375 igvn->replace_node(outer_le, iff);
3376 inner_cl->clear_strip_mined();
3377 return;
3378 }
3379
3380 Node* cle_tail = inner_cle->proj_out(true);
3381 ResourceMark rm;
3382 Node_List old_new;
3383 if (cle_tail->outcnt() > 1) {
3384 // Look for nodes on backedge of inner loop and clone them
3385 Unique_Node_List backedge_nodes;
3386 for (DUIterator_Fast imax, i = cle_tail->fast_outs(imax); i < imax; i++) {
3387 Node* u = cle_tail->fast_out(i);
3388 if (u != inner_cl) {
3389 assert(!u->is_CFG(), "control flow on the backedge?");
3390 backedge_nodes.push(u);
3391 }
3392 }
3393 uint last = igvn->C->unique();
3394 for (uint next = 0; next < backedge_nodes.size(); next++) {
3395 Node* n = backedge_nodes.at(next);
3396 old_new.map(n->_idx, n->clone());
3397 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3398 Node* u = n->fast_out(i);
3399 assert(!u->is_CFG(), "broken");
3400 if (u->_idx >= last) {
3401 continue;
3402 }
3403 if (!u->is_Phi()) {
3404 backedge_nodes.push(u);
3405 } else {
3406 assert(u->in(0) == inner_cl, "strange phi on the backedge");
3407 }
3408 }
3409 }
3410 // Put the clones on the outer loop backedge
3411 Node* le_tail = outer_loop_tail();
3412 for (uint next = 0; next < backedge_nodes.size(); next++) {
3413 Node *n = old_new[backedge_nodes.at(next)->_idx];
3414 for (uint i = 1; i < n->req(); i++) {
3415 if (n->in(i) != nullptr && old_new[n->in(i)->_idx] != nullptr) {
3416 n->set_req(i, old_new[n->in(i)->_idx]);
3417 }
3418 }
3419 if (n->in(0) != nullptr && n->in(0) == cle_tail) {
3420 n->set_req(0, le_tail);
3421 }
3422 igvn->register_new_node_with_optimizer(n);
3423 }
3424 }
3425
3426 Node* iv_phi = nullptr;
3427 // Make a clone of each phi in the inner loop for the outer loop
3428 // When Stores were Sunk, after this step, a Phi may still be missing or its backedge incorrectly wired. See
3429 // handle_sunk_stores_when_finishing_construction()
3430 for (uint i = 0; i < inner_cl->outcnt(); i++) {
3431 Node* u = inner_cl->raw_out(i);
3432 if (u->is_Phi()) {
3433 assert(u->in(0) == inner_cl, "inconsistent");
3434 Node* phi = u->clone();
3435 phi->set_req(0, this);
3436 Node* be = old_new[phi->in(LoopNode::LoopBackControl)->_idx];
3437 if (be != nullptr) {
3438 phi->set_req(LoopNode::LoopBackControl, be);
3439 }
3440 phi = igvn->transform(phi);
3441 igvn->replace_input_of(u, LoopNode::EntryControl, phi);
3442 if (u == inner_iv_phi) {
3443 iv_phi = phi;
3444 }
3445 }
3446 }
3447
3448 handle_sunk_stores_when_finishing_construction(igvn);
3449
3450 if (iv_phi != nullptr) {
3451 // Now adjust the inner loop's exit condition
3452 Node* limit = inner_cl->limit();
3453 // If limit < init for stride > 0 (or limit > init for stride < 0),
3454 // the loop body is run only once. Given limit - init (init - limit resp.)
3455 // would be negative, the unsigned comparison below would cause
3456 // the loop body to be run for LoopStripMiningIter.
3457 Node* max = nullptr;
3458 if (stride > 0) {
3459 max = MaxNode::max_diff_with_zero(limit, iv_phi, TypeInt::INT, *igvn);
3460 } else {
3461 max = MaxNode::max_diff_with_zero(iv_phi, limit, TypeInt::INT, *igvn);
3462 }
3463 // sub is positive and can be larger than the max signed int
3464 // value. Use an unsigned min.
3465 Node* const_iters = igvn->intcon(scaled_iters);
3466 Node* min = MaxNode::unsigned_min(max, const_iters, TypeInt::make(0, scaled_iters, Type::WidenMin), *igvn);
3467 // min is the number of iterations for the next inner loop execution:
3468 // unsigned_min(max(limit - iv_phi, 0), scaled_iters) if stride > 0
3469 // unsigned_min(max(iv_phi - limit, 0), scaled_iters) if stride < 0
3470
3471 Node* new_limit = nullptr;
3472 if (stride > 0) {
3473 new_limit = igvn->transform(new AddINode(min, iv_phi));
3474 } else {
3475 new_limit = igvn->transform(new SubINode(iv_phi, min));
3476 }
3477 Node* inner_cmp = inner_cle->cmp_node();
3478 Node* inner_bol = inner_cle->in(CountedLoopEndNode::TestValue);
3479 Node* outer_bol = inner_bol;
3480 // cmp node for inner loop may be shared
3481 inner_cmp = inner_cmp->clone();
3482 inner_cmp->set_req(2, new_limit);
3483 inner_bol = inner_bol->clone();
3484 inner_bol->set_req(1, igvn->transform(inner_cmp));
3485 igvn->replace_input_of(inner_cle, CountedLoopEndNode::TestValue, igvn->transform(inner_bol));
3486 // Set the outer loop's exit condition too
3487 igvn->replace_input_of(outer_loop_end(), 1, outer_bol);
3488 } else {
3489 assert(false, "should be able to adjust outer loop");
3490 IfNode* outer_le = outer_loop_end();
3491 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
3492 igvn->replace_node(outer_le, iff);
3493 inner_cl->clear_strip_mined();
3494 }
3495 }
3496
3497 void OuterStripMinedLoopNode::transform_to_counted_loop(PhaseIterGVN* igvn, PhaseIdealLoop* iloop) {
3498 CountedLoopNode* inner_cl = unique_ctrl_out()->as_CountedLoop();
3499 CountedLoopEndNode* cle = inner_cl->loopexit();
3500 Node* inner_test = cle->in(1);
3501 IfNode* outer_le = outer_loop_end();
3502 CountedLoopEndNode* inner_cle = inner_cl->loopexit();
3503 Node* safepoint = outer_safepoint();
3504
3505 fix_sunk_stores_when_back_to_counted_loop(igvn, iloop);
3506
3507 // make counted loop exit test always fail
3508 ConINode* zero = igvn->intcon(0);
3509 if (iloop != nullptr) {
3510 iloop->set_root_as_ctrl(zero);
3511 }
3512 igvn->replace_input_of(cle, 1, zero);
3513 // replace outer loop end with CountedLoopEndNode with formers' CLE's exit test
3514 Node* new_end = new CountedLoopEndNode(outer_le->in(0), inner_test, cle->_prob, cle->_fcnt);
3515 register_control(new_end, inner_cl, outer_le->in(0), igvn, iloop);
3516 if (iloop == nullptr) {
3517 igvn->replace_node(outer_le, new_end);
3518 } else {
3519 iloop->lazy_replace(outer_le, new_end);
3520 }
3521 // the backedge of the inner loop must be rewired to the new loop end
3522 Node* backedge = cle->proj_out(true);
3523 igvn->replace_input_of(backedge, 0, new_end);
3524 if (iloop != nullptr) {
3525 iloop->set_idom(backedge, new_end, iloop->dom_depth(new_end) + 1);
3526 }
3527 // make the outer loop go away
3528 igvn->replace_input_of(in(LoopBackControl), 0, igvn->C->top());
3529 igvn->replace_input_of(this, LoopBackControl, igvn->C->top());
3530 inner_cl->clear_strip_mined();
3531 if (iloop != nullptr) {
3532 Unique_Node_List wq;
3533 wq.push(safepoint);
3534
3535 IdealLoopTree* outer_loop_ilt = iloop->get_loop(this);
3536 IdealLoopTree* loop = iloop->get_loop(inner_cl);
3537
3538 for (uint i = 0; i < wq.size(); i++) {
3539 Node* n = wq.at(i);
3540 for (uint j = 0; j < n->req(); ++j) {
3541 Node* in = n->in(j);
3542 if (in == nullptr || in->is_CFG()) {
3543 continue;
3544 }
3545 if (iloop->get_loop(iloop->get_ctrl(in)) != outer_loop_ilt) {
3546 continue;
3547 }
3548 wq.push(in);
3549 }
3550 assert(!loop->_body.contains(n), "Shouldn't append node to body twice");
3551 loop->_body.push(n);
3552 }
3553 iloop->set_loop(safepoint, loop);
3554 loop->_body.push(safepoint);
3555 iloop->set_loop(safepoint->in(0), loop);
3556 loop->_body.push(safepoint->in(0));
3557 outer_loop_ilt->_tail = igvn->C->top();
3558 }
3559 }
3560
3561 void OuterStripMinedLoopNode::remove_outer_loop_and_safepoint(PhaseIterGVN* igvn) const {
3562 CountedLoopNode* inner_cl = unique_ctrl_out()->as_CountedLoop();
3563 Node* outer_sfpt = outer_safepoint();
3564 Node* outer_out = outer_loop_exit();
3565 igvn->replace_node(outer_out, outer_sfpt->in(0));
3566 igvn->replace_input_of(outer_sfpt, 0, igvn->C->top());
3567 inner_cl->clear_strip_mined();
3568 }
3569
3570 Node* OuterStripMinedLoopNode::register_new_node(Node* node, LoopNode* ctrl, PhaseIterGVN* igvn, PhaseIdealLoop* iloop) {
3571 if (iloop == nullptr) {
3572 return igvn->transform(node);
3573 }
3574 iloop->register_new_node(node, ctrl);
3575 return node;
3576 }
3577
3578 Node* OuterStripMinedLoopNode::register_control(Node* node, Node* loop, Node* idom, PhaseIterGVN* igvn,
3579 PhaseIdealLoop* iloop) {
3580 if (iloop == nullptr) {
3581 return igvn->transform(node);
3582 }
3583 iloop->register_control(node, iloop->get_loop(loop), idom);
3584 return node;
3585 }
3586
3587 const Type* OuterStripMinedLoopEndNode::Value(PhaseGVN* phase) const {
3588 if (!in(0)) return Type::TOP;
3589 if (phase->type(in(0)) == Type::TOP)
3590 return Type::TOP;
3591
3592 // Until expansion, the loop end condition is not set so this should not constant fold.
3593 if (is_expanded(phase)) {
3594 return IfNode::Value(phase);
3595 }
3596
3597 return TypeTuple::IFBOTH;
3598 }
3599
3600 bool OuterStripMinedLoopEndNode::is_expanded(PhaseGVN *phase) const {
3601 // The outer strip mined loop head only has Phi uses after expansion
3602 if (phase->is_IterGVN()) {
3603 Node* backedge = proj_out_or_null(true);
3604 if (backedge != nullptr) {
3605 Node* head = backedge->unique_ctrl_out_or_null();
3606 if (head != nullptr && head->is_OuterStripMinedLoop()) {
3607 if (head->find_out_with(Op_Phi) != nullptr) {
3608 return true;
3609 }
3610 }
3611 }
3612 }
3613 return false;
3614 }
3615
3616 Node *OuterStripMinedLoopEndNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3617 if (remove_dead_region(phase, can_reshape)) return this;
3618
3619 return nullptr;
3620 }
3621
3622 //------------------------------filtered_type--------------------------------
3623 // Return a type based on condition control flow
3624 // A successful return will be a type that is restricted due
3625 // to a series of dominating if-tests, such as:
3626 // if (i < 10) {
3627 // if (i > 0) {
3628 // here: "i" type is [1..10)
3629 // }
3630 // }
3631 // or a control flow merge
3632 // if (i < 10) {
3633 // do {
3634 // phi( , ) -- at top of loop type is [min_int..10)
3635 // i = ?
3636 // } while ( i < 10)
3637 //
3638 const TypeInt* PhaseIdealLoop::filtered_type( Node *n, Node* n_ctrl) {
3639 assert(n && n->bottom_type()->is_int(), "must be int");
3640 const TypeInt* filtered_t = nullptr;
3641 if (!n->is_Phi()) {
3642 assert(n_ctrl != nullptr || n_ctrl == C->top(), "valid control");
3643 filtered_t = filtered_type_from_dominators(n, n_ctrl);
3644
3645 } else {
3646 Node* phi = n->as_Phi();
3647 Node* region = phi->in(0);
3648 assert(n_ctrl == nullptr || n_ctrl == region, "ctrl parameter must be region");
3649 if (region && region != C->top()) {
3650 for (uint i = 1; i < phi->req(); i++) {
3651 Node* val = phi->in(i);
3652 Node* use_c = region->in(i);
3653 const TypeInt* val_t = filtered_type_from_dominators(val, use_c);
3654 if (val_t != nullptr) {
3655 if (filtered_t == nullptr) {
3656 filtered_t = val_t;
3657 } else {
3658 filtered_t = filtered_t->meet(val_t)->is_int();
3659 }
3660 }
3661 }
3662 }
3663 }
3664 const TypeInt* n_t = _igvn.type(n)->is_int();
3665 if (filtered_t != nullptr) {
3666 n_t = n_t->join(filtered_t)->is_int();
3667 }
3668 return n_t;
3669 }
3670
3671
3672 //------------------------------filtered_type_from_dominators--------------------------------
3673 // Return a possibly more restrictive type for val based on condition control flow of dominators
3674 const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *use_ctrl) {
3675 if (val->is_Con()) {
3676 return val->bottom_type()->is_int();
3677 }
3678 uint if_limit = 10; // Max number of dominating if's visited
3679 const TypeInt* rtn_t = nullptr;
3680
3681 if (use_ctrl && use_ctrl != C->top()) {
3682 Node* val_ctrl = get_ctrl(val);
3683 uint val_dom_depth = dom_depth(val_ctrl);
3684 Node* pred = use_ctrl;
3685 uint if_cnt = 0;
3686 while (if_cnt < if_limit) {
3687 if ((pred->Opcode() == Op_IfTrue || pred->Opcode() == Op_IfFalse)) {
3688 if_cnt++;
3689 const TypeInt* if_t = IfNode::filtered_int_type(&_igvn, val, pred);
3690 if (if_t != nullptr) {
3691 if (rtn_t == nullptr) {
3692 rtn_t = if_t;
3693 } else {
3694 rtn_t = rtn_t->join(if_t)->is_int();
3695 }
3696 }
3697 }
3698 pred = idom(pred);
3699 if (pred == nullptr || pred == C->top()) {
3700 break;
3701 }
3702 // Stop if going beyond definition block of val
3703 if (dom_depth(pred) < val_dom_depth) {
3704 break;
3705 }
3706 }
3707 }
3708 return rtn_t;
3709 }
3710
3711
3712 //------------------------------dump_spec--------------------------------------
3713 // Dump special per-node info
3714 #ifndef PRODUCT
3715 void CountedLoopEndNode::dump_spec(outputStream *st) const {
3716 if( in(TestValue) != nullptr && in(TestValue)->is_Bool() ) {
3717 BoolTest bt( test_trip()); // Added this for g++.
3718
3719 st->print("[");
3720 bt.dump_on(st);
3721 st->print("]");
3722 }
3723 st->print(" ");
3724 IfNode::dump_spec(st);
3725 }
3726 #endif
3727
3728 //=============================================================================
3729 //------------------------------is_member--------------------------------------
3730 // Is 'l' a member of 'this'?
3731 bool IdealLoopTree::is_member(const IdealLoopTree *l) const {
3732 while( l->_nest > _nest ) l = l->_parent;
3733 return l == this;
3734 }
3735
3736 //------------------------------set_nest---------------------------------------
3737 // Set loop tree nesting depth. Accumulate _has_call bits.
3738 int IdealLoopTree::set_nest( uint depth ) {
3739 assert(depth <= SHRT_MAX, "sanity");
3740 _nest = depth;
3741 int bits = _has_call;
3742 if( _child ) bits |= _child->set_nest(depth+1);
3743 if( bits ) _has_call = 1;
3744 if( _next ) bits |= _next ->set_nest(depth );
3745 return bits;
3746 }
3747
3748 //------------------------------split_fall_in----------------------------------
3749 // Split out multiple fall-in edges from the loop header. Move them to a
3750 // private RegionNode before the loop. This becomes the loop landing pad.
3751 void IdealLoopTree::split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ) {
3752 PhaseIterGVN &igvn = phase->_igvn;
3753 uint i;
3754
3755 // Make a new RegionNode to be the landing pad.
3756 RegionNode* landing_pad = new RegionNode(fall_in_cnt + 1);
3757 phase->set_loop(landing_pad,_parent);
3758 // If _head was irreducible loop entry, landing_pad may now be too
3759 landing_pad->set_loop_status(_head->as_Region()->loop_status());
3760 // Gather all the fall-in control paths into the landing pad
3761 uint icnt = fall_in_cnt;
3762 uint oreq = _head->req();
3763 for( i = oreq-1; i>0; i-- )
3764 if( !phase->is_member( this, _head->in(i) ) )
3765 landing_pad->set_req(icnt--,_head->in(i));
3766
3767 // Peel off PhiNode edges as well
3768 for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
3769 Node *oj = _head->fast_out(j);
3770 if( oj->is_Phi() ) {
3771 PhiNode* old_phi = oj->as_Phi();
3772 assert( old_phi->region() == _head, "" );
3773 igvn.hash_delete(old_phi); // Yank from hash before hacking edges
3774 Node *p = PhiNode::make_blank(landing_pad, old_phi);
3775 uint icnt = fall_in_cnt;
3776 for( i = oreq-1; i>0; i-- ) {
3777 if( !phase->is_member( this, _head->in(i) ) ) {
3778 p->init_req(icnt--, old_phi->in(i));
3779 // Go ahead and clean out old edges from old phi
3780 old_phi->del_req(i);
3781 }
3782 }
3783 // Search for CSE's here, because ZKM.jar does a lot of
3784 // loop hackery and we need to be a little incremental
3785 // with the CSE to avoid O(N^2) node blow-up.
3786 Node *p2 = igvn.hash_find_insert(p); // Look for a CSE
3787 if( p2 ) { // Found CSE
3788 p->destruct(&igvn); // Recover useless new node
3789 p = p2; // Use old node
3790 } else {
3791 igvn.register_new_node_with_optimizer(p, old_phi);
3792 }
3793 // Make old Phi refer to new Phi.
3794 old_phi->add_req(p);
3795 // Check for the special case of making the old phi useless and
3796 // disappear it. In JavaGrande I have a case where this useless
3797 // Phi is the loop limit and prevents recognizing a CountedLoop
3798 // which in turn prevents removing an empty loop.
3799 Node *id_old_phi = old_phi->Identity(&igvn);
3800 if( id_old_phi != old_phi ) { // Found a simple identity?
3801 // Note that I cannot call 'replace_node' here, because
3802 // that will yank the edge from old_phi to the Region and
3803 // I'm mid-iteration over the Region's uses.
3804 for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) {
3805 Node* use = old_phi->last_out(i);
3806 igvn.rehash_node_delayed(use);
3807 uint uses_found = 0;
3808 for (uint j = 0; j < use->len(); j++) {
3809 if (use->in(j) == old_phi) {
3810 if (j < use->req()) use->set_req (j, id_old_phi);
3811 else use->set_prec(j, id_old_phi);
3812 uses_found++;
3813 }
3814 }
3815 i -= uses_found; // we deleted 1 or more copies of this edge
3816 }
3817 }
3818 igvn._worklist.push(old_phi);
3819 }
3820 }
3821 // Finally clean out the fall-in edges from the RegionNode
3822 for( i = oreq-1; i>0; i-- ) {
3823 if( !phase->is_member( this, _head->in(i) ) ) {
3824 _head->del_req(i);
3825 }
3826 }
3827 igvn.rehash_node_delayed(_head);
3828 // Transform landing pad
3829 igvn.register_new_node_with_optimizer(landing_pad, _head);
3830 // Insert landing pad into the header
3831 _head->add_req(landing_pad);
3832 }
3833
3834 //------------------------------split_outer_loop-------------------------------
3835 // Split out the outermost loop from this shared header.
3836 void IdealLoopTree::split_outer_loop( PhaseIdealLoop *phase ) {
3837 PhaseIterGVN &igvn = phase->_igvn;
3838
3839 // Find index of outermost loop; it should also be my tail.
3840 uint outer_idx = 1;
3841 while( _head->in(outer_idx) != _tail ) outer_idx++;
3842
3843 // Make a LoopNode for the outermost loop.
3844 Node *ctl = _head->in(LoopNode::EntryControl);
3845 Node *outer = new LoopNode( ctl, _head->in(outer_idx) );
3846 outer = igvn.register_new_node_with_optimizer(outer, _head);
3847 phase->set_created_loop_node();
3848
3849 // Outermost loop falls into '_head' loop
3850 _head->set_req(LoopNode::EntryControl, outer);
3851 _head->del_req(outer_idx);
3852 // Split all the Phis up between '_head' loop and 'outer' loop.
3853 for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
3854 Node *out = _head->fast_out(j);
3855 if( out->is_Phi() ) {
3856 PhiNode *old_phi = out->as_Phi();
3857 assert( old_phi->region() == _head, "" );
3858 Node *phi = PhiNode::make_blank(outer, old_phi);
3859 phi->init_req(LoopNode::EntryControl, old_phi->in(LoopNode::EntryControl));
3860 phi->init_req(LoopNode::LoopBackControl, old_phi->in(outer_idx));
3861 phi = igvn.register_new_node_with_optimizer(phi, old_phi);
3862 // Make old Phi point to new Phi on the fall-in path
3863 igvn.replace_input_of(old_phi, LoopNode::EntryControl, phi);
3864 old_phi->del_req(outer_idx);
3865 }
3866 }
3867
3868 // Use the new loop head instead of the old shared one
3869 _head = outer;
3870 phase->set_loop(_head, this);
3871 }
3872
3873 //------------------------------fix_parent-------------------------------------
3874 static void fix_parent( IdealLoopTree *loop, IdealLoopTree *parent ) {
3875 loop->_parent = parent;
3876 if( loop->_child ) fix_parent( loop->_child, loop );
3877 if( loop->_next ) fix_parent( loop->_next , parent );
3878 }
3879
3880 //------------------------------estimate_path_freq-----------------------------
3881 static float estimate_path_freq( Node *n ) {
3882 // Try to extract some path frequency info
3883 IfNode *iff;
3884 for( int i = 0; i < 50; i++ ) { // Skip through a bunch of uncommon tests
3885 uint nop = n->Opcode();
3886 if( nop == Op_SafePoint ) { // Skip any safepoint
3887 n = n->in(0);
3888 continue;
3889 }
3890 if( nop == Op_CatchProj ) { // Get count from a prior call
3891 // Assume call does not always throw exceptions: means the call-site
3892 // count is also the frequency of the fall-through path.
3893 assert( n->is_CatchProj(), "" );
3894 if( ((CatchProjNode*)n)->_con != CatchProjNode::fall_through_index )
3895 return 0.0f; // Assume call exception path is rare
3896 Node *call = n->in(0)->in(0)->in(0);
3897 assert( call->is_Call(), "expect a call here" );
3898 const JVMState *jvms = ((CallNode*)call)->jvms();
3899 ciMethodData* methodData = jvms->method()->method_data();
3900 if (!methodData->is_mature()) return 0.0f; // No call-site data
3901 ciProfileData* data = methodData->bci_to_data(jvms->bci());
3902 if ((data == nullptr) || !data->is_CounterData()) {
3903 // no call profile available, try call's control input
3904 n = n->in(0);
3905 continue;
3906 }
3907 return data->as_CounterData()->count()/FreqCountInvocations;
3908 }
3909 // See if there's a gating IF test
3910 Node *n_c = n->in(0);
3911 if( !n_c->is_If() ) break; // No estimate available
3912 iff = n_c->as_If();
3913 if( iff->_fcnt != COUNT_UNKNOWN ) // Have a valid count?
3914 // Compute how much count comes on this path
3915 return ((nop == Op_IfTrue) ? iff->_prob : 1.0f - iff->_prob) * iff->_fcnt;
3916 // Have no count info. Skip dull uncommon-trap like branches.
3917 if( (nop == Op_IfTrue && iff->_prob < PROB_LIKELY_MAG(5)) ||
3918 (nop == Op_IfFalse && iff->_prob > PROB_UNLIKELY_MAG(5)) )
3919 break;
3920 // Skip through never-taken branch; look for a real loop exit.
3921 n = iff->in(0);
3922 }
3923 return 0.0f; // No estimate available
3924 }
3925
3926 //------------------------------merge_many_backedges---------------------------
3927 // Merge all the backedges from the shared header into a private Region.
3928 // Feed that region as the one backedge to this loop.
3929 void IdealLoopTree::merge_many_backedges( PhaseIdealLoop *phase ) {
3930 uint i;
3931
3932 // Scan for the top 2 hottest backedges
3933 float hotcnt = 0.0f;
3934 float warmcnt = 0.0f;
3935 uint hot_idx = 0;
3936 // Loop starts at 2 because slot 1 is the fall-in path
3937 for( i = 2; i < _head->req(); i++ ) {
3938 float cnt = estimate_path_freq(_head->in(i));
3939 if( cnt > hotcnt ) { // Grab hottest path
3940 warmcnt = hotcnt;
3941 hotcnt = cnt;
3942 hot_idx = i;
3943 } else if( cnt > warmcnt ) { // And 2nd hottest path
3944 warmcnt = cnt;
3945 }
3946 }
3947
3948 // See if the hottest backedge is worthy of being an inner loop
3949 // by being much hotter than the next hottest backedge.
3950 if( hotcnt <= 0.0001 ||
3951 hotcnt < 2.0*warmcnt ) hot_idx = 0;// No hot backedge
3952
3953 // Peel out the backedges into a private merge point; peel
3954 // them all except optionally hot_idx.
3955 PhaseIterGVN &igvn = phase->_igvn;
3956
3957 Node *hot_tail = nullptr;
3958 // Make a Region for the merge point
3959 Node *r = new RegionNode(1);
3960 for( i = 2; i < _head->req(); i++ ) {
3961 if( i != hot_idx )
3962 r->add_req( _head->in(i) );
3963 else hot_tail = _head->in(i);
3964 }
3965 igvn.register_new_node_with_optimizer(r, _head);
3966 // Plug region into end of loop _head, followed by hot_tail
3967 while( _head->req() > 3 ) _head->del_req( _head->req()-1 );
3968 igvn.replace_input_of(_head, 2, r);
3969 if( hot_idx ) _head->add_req(hot_tail);
3970
3971 // Split all the Phis up between '_head' loop and the Region 'r'
3972 for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
3973 Node *out = _head->fast_out(j);
3974 if( out->is_Phi() ) {
3975 PhiNode* n = out->as_Phi();
3976 igvn.hash_delete(n); // Delete from hash before hacking edges
3977 Node *hot_phi = nullptr;
3978 Node *phi = new PhiNode(r, n->type(), n->adr_type());
3979 // Check all inputs for the ones to peel out
3980 uint j = 1;
3981 for( uint i = 2; i < n->req(); i++ ) {
3982 if( i != hot_idx )
3983 phi->set_req( j++, n->in(i) );
3984 else hot_phi = n->in(i);
3985 }
3986 // Register the phi but do not transform until whole place transforms
3987 igvn.register_new_node_with_optimizer(phi, n);
3988 // Add the merge phi to the old Phi
3989 while( n->req() > 3 ) n->del_req( n->req()-1 );
3990 igvn.replace_input_of(n, 2, phi);
3991 if( hot_idx ) n->add_req(hot_phi);
3992 }
3993 }
3994
3995
3996 // Insert a new IdealLoopTree inserted below me. Turn it into a clone
3997 // of self loop tree. Turn self into a loop headed by _head and with
3998 // tail being the new merge point.
3999 IdealLoopTree *ilt = new IdealLoopTree( phase, _head, _tail );
4000 phase->set_loop(_tail,ilt); // Adjust tail
4001 _tail = r; // Self's tail is new merge point
4002 phase->set_loop(r,this);
4003 ilt->_child = _child; // New guy has my children
4004 _child = ilt; // Self has new guy as only child
4005 ilt->_parent = this; // new guy has self for parent
4006 ilt->_nest = _nest; // Same nesting depth (for now)
4007
4008 // Starting with 'ilt', look for child loop trees using the same shared
4009 // header. Flatten these out; they will no longer be loops in the end.
4010 IdealLoopTree **pilt = &_child;
4011 while( ilt ) {
4012 if( ilt->_head == _head ) {
4013 uint i;
4014 for( i = 2; i < _head->req(); i++ )
4015 if( _head->in(i) == ilt->_tail )
4016 break; // Still a loop
4017 if( i == _head->req() ) { // No longer a loop
4018 // Flatten ilt. Hang ilt's "_next" list from the end of
4019 // ilt's '_child' list. Move the ilt's _child up to replace ilt.
4020 IdealLoopTree **cp = &ilt->_child;
4021 while( *cp ) cp = &(*cp)->_next; // Find end of child list
4022 *cp = ilt->_next; // Hang next list at end of child list
4023 *pilt = ilt->_child; // Move child up to replace ilt
4024 ilt->_head = nullptr; // Flag as a loop UNIONED into parent
4025 ilt = ilt->_child; // Repeat using new ilt
4026 continue; // do not advance over ilt->_child
4027 }
4028 assert( ilt->_tail == hot_tail, "expected to only find the hot inner loop here" );
4029 phase->set_loop(_head,ilt);
4030 }
4031 pilt = &ilt->_child; // Advance to next
4032 ilt = *pilt;
4033 }
4034
4035 if( _child ) fix_parent( _child, this );
4036 }
4037
4038 //------------------------------beautify_loops---------------------------------
4039 // Split shared headers and insert loop landing pads.
4040 // Insert a LoopNode to replace the RegionNode.
4041 // Return TRUE if loop tree is structurally changed.
4042 bool IdealLoopTree::beautify_loops( PhaseIdealLoop *phase ) {
4043 bool result = false;
4044 // Cache parts in locals for easy
4045 PhaseIterGVN &igvn = phase->_igvn;
4046
4047 igvn.hash_delete(_head); // Yank from hash before hacking edges
4048
4049 // Check for multiple fall-in paths. Peel off a landing pad if need be.
4050 int fall_in_cnt = 0;
4051 for( uint i = 1; i < _head->req(); i++ )
4052 if( !phase->is_member( this, _head->in(i) ) )
4053 fall_in_cnt++;
4054 assert( fall_in_cnt, "at least 1 fall-in path" );
4055 if( fall_in_cnt > 1 ) // Need a loop landing pad to merge fall-ins
4056 split_fall_in( phase, fall_in_cnt );
4057
4058 // Swap inputs to the _head and all Phis to move the fall-in edge to
4059 // the left.
4060 fall_in_cnt = 1;
4061 while( phase->is_member( this, _head->in(fall_in_cnt) ) )
4062 fall_in_cnt++;
4063 if( fall_in_cnt > 1 ) {
4064 // Since I am just swapping inputs I do not need to update def-use info
4065 Node *tmp = _head->in(1);
4066 igvn.rehash_node_delayed(_head);
4067 _head->set_req( 1, _head->in(fall_in_cnt) );
4068 _head->set_req( fall_in_cnt, tmp );
4069 // Swap also all Phis
4070 for (DUIterator_Fast imax, i = _head->fast_outs(imax); i < imax; i++) {
4071 Node* phi = _head->fast_out(i);
4072 if( phi->is_Phi() ) {
4073 igvn.rehash_node_delayed(phi); // Yank from hash before hacking edges
4074 tmp = phi->in(1);
4075 phi->set_req( 1, phi->in(fall_in_cnt) );
4076 phi->set_req( fall_in_cnt, tmp );
4077 }
4078 }
4079 }
4080 assert( !phase->is_member( this, _head->in(1) ), "left edge is fall-in" );
4081 assert( phase->is_member( this, _head->in(2) ), "right edge is loop" );
4082
4083 // If I am a shared header (multiple backedges), peel off the many
4084 // backedges into a private merge point and use the merge point as
4085 // the one true backedge.
4086 if (_head->req() > 3) {
4087 // Merge the many backedges into a single backedge but leave
4088 // the hottest backedge as separate edge for the following peel.
4089 if (!_irreducible) {
4090 merge_many_backedges( phase );
4091 }
4092
4093 // When recursively beautify my children, split_fall_in can change
4094 // loop tree structure when I am an irreducible loop. Then the head
4095 // of my children has a req() not bigger than 3. Here we need to set
4096 // result to true to catch that case in order to tell the caller to
4097 // rebuild loop tree. See issue JDK-8244407 for details.
4098 result = true;
4099 }
4100
4101 // If I have one hot backedge, peel off myself loop.
4102 // I better be the outermost loop.
4103 if (_head->req() > 3 && !_irreducible) {
4104 split_outer_loop( phase );
4105 result = true;
4106
4107 } else if (!_head->is_Loop() && !_irreducible) {
4108 // Make a new LoopNode to replace the old loop head
4109 Node *l = new LoopNode( _head->in(1), _head->in(2) );
4110 l = igvn.register_new_node_with_optimizer(l, _head);
4111 phase->set_created_loop_node();
4112 // Go ahead and replace _head
4113 phase->_igvn.replace_node( _head, l );
4114 _head = l;
4115 phase->set_loop(_head, this);
4116 }
4117
4118 // Now recursively beautify nested loops
4119 if( _child ) result |= _child->beautify_loops( phase );
4120 if( _next ) result |= _next ->beautify_loops( phase );
4121 return result;
4122 }
4123
4124 //------------------------------allpaths_check_safepts----------------------------
4125 // Allpaths backwards scan. Starting at the head, traversing all backedges, and the body. Terminating each path at first
4126 // safepoint encountered. Helper for check_safepts.
4127 void IdealLoopTree::allpaths_check_safepts(VectorSet &visited, Node_List &stack) {
4128 assert(stack.size() == 0, "empty stack");
4129 stack.push(_head);
4130 visited.clear();
4131 visited.set(_head->_idx);
4132 while (stack.size() > 0) {
4133 Node* n = stack.pop();
4134 if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) {
4135 // Terminate this path
4136 } else if (n->Opcode() == Op_SafePoint) {
4137 if (_phase->get_loop(n) != this) {
4138 if (_required_safept == nullptr) _required_safept = new Node_List();
4139 // save the first we run into on that path: closest to the tail if the head has a single backedge
4140 _required_safept->push(n);
4141 }
4142 // Terminate this path
4143 } else {
4144 uint start = n->is_Region() ? 1 : 0;
4145 uint end = n->is_Region() && (!n->is_Loop() || n == _head) ? n->req() : start + 1;
4146 for (uint i = start; i < end; i++) {
4147 Node* in = n->in(i);
4148 assert(in->is_CFG(), "must be");
4149 if (!visited.test_set(in->_idx) && is_member(_phase->get_loop(in))) {
4150 stack.push(in);
4151 }
4152 }
4153 }
4154 }
4155 }
4156
4157 //------------------------------check_safepts----------------------------
4158 // Given dominators, try to find loops with calls that must always be
4159 // executed (call dominates loop tail). These loops do not need non-call
4160 // safepoints (ncsfpt).
4161 //
4162 // A complication is that a safepoint in a inner loop may be needed
4163 // by an outer loop. In the following, the inner loop sees it has a
4164 // call (block 3) on every path from the head (block 2) to the
4165 // backedge (arc 3->2). So it deletes the ncsfpt (non-call safepoint)
4166 // in block 2, _but_ this leaves the outer loop without a safepoint.
4167 //
4168 // entry 0
4169 // |
4170 // v
4171 // outer 1,2 +->1
4172 // | |
4173 // | v
4174 // | 2<---+ ncsfpt in 2
4175 // |_/|\ |
4176 // | v |
4177 // inner 2,3 / 3 | call in 3
4178 // / | |
4179 // v +--+
4180 // exit 4
4181 //
4182 //
4183 // This method creates a list (_required_safept) of ncsfpt nodes that must
4184 // be protected is created for each loop. When a ncsfpt maybe deleted, it
4185 // is first looked for in the lists for the outer loops of the current loop.
4186 //
4187 // The insights into the problem:
4188 // A) counted loops are okay
4189 // B) innermost loops are okay (only an inner loop can delete
4190 // a ncsfpt needed by an outer loop)
4191 // C) a loop is immune from an inner loop deleting a safepoint
4192 // if the loop has a call on the idom-path
4193 // D) a loop is also immune if it has a ncsfpt (non-call safepoint) on the
4194 // idom-path that is not in a nested loop
4195 // E) otherwise, an ncsfpt on the idom-path that is nested in an inner
4196 // loop needs to be prevented from deletion by an inner loop
4197 //
4198 // There are two analyses:
4199 // 1) The first, and cheaper one, scans the loop body from
4200 // tail to head following the idom (immediate dominator)
4201 // chain, looking for the cases (C,D,E) above.
4202 // Since inner loops are scanned before outer loops, there is summary
4203 // information about inner loops. Inner loops can be skipped over
4204 // when the tail of an inner loop is encountered.
4205 //
4206 // 2) The second, invoked if the first fails to find a call or ncsfpt on
4207 // the idom path (which is rare), scans all predecessor control paths
4208 // from the tail to the head, terminating a path when a call or sfpt
4209 // is encountered, to find the ncsfpt's that are closest to the tail.
4210 //
4211 void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) {
4212 // Bottom up traversal
4213 IdealLoopTree* ch = _child;
4214 if (_child) _child->check_safepts(visited, stack);
4215 if (_next) _next ->check_safepts(visited, stack);
4216
4217 if (!_head->is_CountedLoop() && !_has_sfpt && _parent != nullptr) {
4218 bool has_call = false; // call on dom-path
4219 bool has_local_ncsfpt = false; // ncsfpt on dom-path at this loop depth
4220 Node* nonlocal_ncsfpt = nullptr; // ncsfpt on dom-path at a deeper depth
4221 if (!_irreducible) {
4222 // Scan the dom-path nodes from tail to head
4223 for (Node* n = tail(); n != _head; n = _phase->idom(n)) {
4224 if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) {
4225 has_call = true;
4226 _has_sfpt = 1; // Then no need for a safept!
4227 break;
4228 } else if (n->Opcode() == Op_SafePoint) {
4229 if (_phase->get_loop(n) == this) {
4230 has_local_ncsfpt = true;
4231 break;
4232 }
4233 if (nonlocal_ncsfpt == nullptr) {
4234 nonlocal_ncsfpt = n; // save the one closest to the tail
4235 }
4236 } else {
4237 IdealLoopTree* nlpt = _phase->get_loop(n);
4238 if (this != nlpt) {
4239 // If at an inner loop tail, see if the inner loop has already
4240 // recorded seeing a call on the dom-path (and stop.) If not,
4241 // jump to the head of the inner loop.
4242 assert(is_member(nlpt), "nested loop");
4243 Node* tail = nlpt->_tail;
4244 if (tail->in(0)->is_If()) tail = tail->in(0);
4245 if (n == tail) {
4246 // If inner loop has call on dom-path, so does outer loop
4247 if (nlpt->_has_sfpt) {
4248 has_call = true;
4249 _has_sfpt = 1;
4250 break;
4251 }
4252 // Skip to head of inner loop
4253 assert(_phase->is_dominator(_head, nlpt->_head), "inner head dominated by outer head");
4254 n = nlpt->_head;
4255 if (_head == n) {
4256 // this and nlpt (inner loop) have the same loop head. This should not happen because
4257 // during beautify_loops we call merge_many_backedges. However, infinite loops may not
4258 // have been attached to the loop-tree during build_loop_tree before beautify_loops,
4259 // but then attached in the build_loop_tree afterwards, and so still have unmerged
4260 // backedges. Check if we are indeed in an infinite subgraph, and terminate the scan,
4261 // since we have reached the loop head of this.
4262 assert(_head->as_Region()->is_in_infinite_subgraph(),
4263 "only expect unmerged backedges in infinite loops");
4264 break;
4265 }
4266 }
4267 }
4268 }
4269 }
4270 }
4271 // Record safept's that this loop needs preserved when an
4272 // inner loop attempts to delete it's safepoints.
4273 if (_child != nullptr && !has_call && !has_local_ncsfpt) {
4274 if (nonlocal_ncsfpt != nullptr) {
4275 if (_required_safept == nullptr) _required_safept = new Node_List();
4276 _required_safept->push(nonlocal_ncsfpt);
4277 } else {
4278 // Failed to find a suitable safept on the dom-path. Now use
4279 // an all paths walk from tail to head, looking for safepoints to preserve.
4280 allpaths_check_safepts(visited, stack);
4281 }
4282 }
4283 }
4284 }
4285
4286 //---------------------------is_deleteable_safept----------------------------
4287 // Is safept not required by an outer loop?
4288 bool PhaseIdealLoop::is_deleteable_safept(Node* sfpt) {
4289 assert(sfpt->Opcode() == Op_SafePoint, "");
4290 IdealLoopTree* lp = get_loop(sfpt)->_parent;
4291 while (lp != nullptr) {
4292 Node_List* sfpts = lp->_required_safept;
4293 if (sfpts != nullptr) {
4294 for (uint i = 0; i < sfpts->size(); i++) {
4295 if (sfpt == sfpts->at(i))
4296 return false;
4297 }
4298 }
4299 lp = lp->_parent;
4300 }
4301 return true;
4302 }
4303
4304 //---------------------------replace_parallel_iv-------------------------------
4305 // Replace parallel induction variable (parallel to trip counter)
4306 // This optimization looks for patterns similar to:
4307 //
4308 // int a = init2;
4309 // for (int iv = init; iv < limit; iv += stride_con) {
4310 // a += stride_con2;
4311 // }
4312 //
4313 // and transforms it to:
4314 //
4315 // int iv2 = init2
4316 // int iv = init
4317 // loop:
4318 // if (iv >= limit) goto exit
4319 // iv += stride_con
4320 // iv2 = init2 + (iv - init) * (stride_con2 / stride_con)
4321 // goto loop
4322 // exit:
4323 // ...
4324 //
4325 // Such transformation introduces more optimization opportunities. In this
4326 // particular example, the loop can be eliminated entirely given that
4327 // `stride_con2 / stride_con` is exact (i.e., no remainder). Checks are in
4328 // place to only perform this optimization if such a division is exact. This
4329 // example will be transformed into its semantic equivalence:
4330 //
4331 // int iv2 = (iv * stride_con2 / stride_con) + (init2 - (init * stride_con2 / stride_con))
4332 //
4333 // which corresponds to the structure of transformed subgraph.
4334 //
4335 // However, if there is a mismatch between types of the loop and the parallel
4336 // induction variable (e.g., a long-typed IV in an int-typed loop), type
4337 // conversions are required:
4338 //
4339 // long iv2 = ((long) iv * stride_con2 / stride_con) + (init2 - ((long) init * stride_con2 / stride_con))
4340 //
4341 void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) {
4342 assert(loop->_head->is_CountedLoop(), "");
4343 CountedLoopNode *cl = loop->_head->as_CountedLoop();
4344 if (!cl->is_valid_counted_loop(T_INT)) {
4345 return; // skip malformed counted loop
4346 }
4347 Node *incr = cl->incr();
4348 if (incr == nullptr) {
4349 return; // Dead loop?
4350 }
4351 Node *init = cl->init_trip();
4352 Node *phi = cl->phi();
4353 jlong stride_con = cl->stride_con();
4354
4355 // Visit all children, looking for Phis
4356 for (DUIterator i = cl->outs(); cl->has_out(i); i++) {
4357 Node *out = cl->out(i);
4358 // Look for other phis (secondary IVs). Skip dead ones
4359 if (!out->is_Phi() || out == phi || !has_node(out)) {
4360 continue;
4361 }
4362
4363 PhiNode* phi2 = out->as_Phi();
4364 Node* incr2 = phi2->in(LoopNode::LoopBackControl);
4365 // Look for induction variables of the form: X += constant
4366 if (phi2->region() != loop->_head ||
4367 incr2->req() != 3 ||
4368 incr2->in(1)->uncast() != phi2 ||
4369 incr2 == incr ||
4370 (incr2->Opcode() != Op_AddI && incr2->Opcode() != Op_AddL) ||
4371 !incr2->in(2)->is_Con()) {
4372 continue;
4373 }
4374
4375 if (incr2->in(1)->is_ConstraintCast() &&
4376 !(incr2->in(1)->in(0)->is_IfProj() && incr2->in(1)->in(0)->in(0)->is_RangeCheck())) {
4377 // Skip AddI->CastII->Phi case if CastII is not controlled by local RangeCheck
4378 continue;
4379 }
4380 // Check for parallel induction variable (parallel to trip counter)
4381 // via an affine function. In particular, count-down loops with
4382 // count-up array indices are common. We only RCE references off
4383 // the trip-counter, so we need to convert all these to trip-counter
4384 // expressions.
4385 Node* init2 = phi2->in(LoopNode::EntryControl);
4386
4387 // Determine the basic type of the stride constant (and the iv being incremented).
4388 BasicType stride_con2_bt = incr2->Opcode() == Op_AddI ? T_INT : T_LONG;
4389 jlong stride_con2 = incr2->in(2)->get_integer_as_long(stride_con2_bt);
4390
4391 // The ratio of the two strides cannot be represented as an int
4392 // if stride_con2 is min_jint (or min_jlong, respectively) and
4393 // stride_con is -1.
4394 if (stride_con2 == min_signed_integer(stride_con2_bt) && stride_con == -1) {
4395 continue;
4396 }
4397
4398 // The general case here gets a little tricky. We want to find the
4399 // GCD of all possible parallel IV's and make a new IV using this
4400 // GCD for the loop. Then all possible IVs are simple multiples of
4401 // the GCD. In practice, this will cover very few extra loops.
4402 // Instead we require 'stride_con2' to be a multiple of 'stride_con',
4403 // where +/-1 is the common case, but other integer multiples are
4404 // also easy to handle.
4405 jlong ratio_con = stride_con2 / stride_con;
4406
4407 if ((ratio_con * stride_con) != stride_con2) { // Check for exact (no remainder)
4408 continue;
4409 }
4410
4411 #ifndef PRODUCT
4412 if (TraceLoopOpts) {
4413 tty->print("Parallel IV: %d ", phi2->_idx);
4414 loop->dump_head();
4415 }
4416 #endif
4417
4418 // Convert to using the trip counter. The parallel induction
4419 // variable differs from the trip counter by a loop-invariant
4420 // amount, the difference between their respective initial values.
4421 // It is scaled by the 'ratio_con'.
4422 Node* ratio = integercon(ratio_con, stride_con2_bt);
4423
4424 Node* init_converted = insert_convert_node_if_needed(stride_con2_bt, init);
4425 Node* phi_converted = insert_convert_node_if_needed(stride_con2_bt, phi);
4426
4427 Node* ratio_init = MulNode::make(init_converted, ratio, stride_con2_bt);
4428 _igvn.register_new_node_with_optimizer(ratio_init, init_converted);
4429 set_early_ctrl(ratio_init, false);
4430
4431 Node* diff = SubNode::make(init2, ratio_init, stride_con2_bt);
4432 _igvn.register_new_node_with_optimizer(diff, init2);
4433 set_early_ctrl(diff, false);
4434
4435 Node* ratio_idx = MulNode::make(phi_converted, ratio, stride_con2_bt);
4436 _igvn.register_new_node_with_optimizer(ratio_idx, phi_converted);
4437 set_ctrl(ratio_idx, cl);
4438
4439 Node* add = AddNode::make(ratio_idx, diff, stride_con2_bt);
4440 _igvn.register_new_node_with_optimizer(add);
4441 set_ctrl(add, cl);
4442
4443 _igvn.replace_node( phi2, add );
4444 // Sometimes an induction variable is unused
4445 if (add->outcnt() == 0) {
4446 _igvn.remove_dead_node(add);
4447 }
4448 --i; // deleted this phi; rescan starting with next position
4449 }
4450 }
4451
4452 Node* PhaseIdealLoop::insert_convert_node_if_needed(BasicType target, Node* input) {
4453 BasicType source = _igvn.type(input)->basic_type();
4454 if (source == target) {
4455 return input;
4456 }
4457
4458 Node* converted = ConvertNode::create_convert(source, target, input);
4459 _igvn.register_new_node_with_optimizer(converted, input);
4460 set_early_ctrl(converted, false);
4461
4462 return converted;
4463 }
4464
4465 void IdealLoopTree::remove_safepoints(PhaseIdealLoop* phase, bool keep_one) {
4466 Node* keep = nullptr;
4467 if (keep_one) {
4468 // Look for a safepoint on the idom-path.
4469 for (Node* i = tail(); i != _head; i = phase->idom(i)) {
4470 if (i->Opcode() == Op_SafePoint && phase->get_loop(i) == this) {
4471 keep = i;
4472 break; // Found one
4473 }
4474 }
4475 }
4476
4477 // Don't remove any safepoints if it is requested to keep a single safepoint and
4478 // no safepoint was found on idom-path. It is not safe to remove any safepoint
4479 // in this case since there's no safepoint dominating all paths in the loop body.
4480 bool prune = !keep_one || keep != nullptr;
4481
4482 // Delete other safepoints in this loop.
4483 Node_List* sfpts = _safepts;
4484 if (prune && sfpts != nullptr) {
4485 assert(keep == nullptr || keep->Opcode() == Op_SafePoint, "not safepoint");
4486 for (uint i = 0; i < sfpts->size(); i++) {
4487 Node* n = sfpts->at(i);
4488 assert(phase->get_loop(n) == this, "");
4489 if (n != keep && phase->is_deleteable_safept(n)) {
4490 phase->lazy_replace(n, n->in(TypeFunc::Control));
4491 }
4492 }
4493 }
4494 }
4495
4496 //------------------------------counted_loop-----------------------------------
4497 // Convert to counted loops where possible
4498 void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {
4499
4500 // For grins, set the inner-loop flag here
4501 if (!_child) {
4502 if (_head->is_Loop()) _head->as_Loop()->set_inner_loop();
4503 }
4504
4505 IdealLoopTree* loop = this;
4506 if (_head->is_CountedLoop() ||
4507 phase->is_counted_loop(_head, loop, T_INT)) {
4508
4509 if (LoopStripMiningIter == 0 || _head->as_CountedLoop()->is_strip_mined()) {
4510 // Indicate we do not need a safepoint here
4511 _has_sfpt = 1;
4512 }
4513
4514 // Remove safepoints
4515 bool keep_one_sfpt = !(_has_call || _has_sfpt);
4516 remove_safepoints(phase, keep_one_sfpt);
4517
4518 // Look for induction variables
4519 phase->replace_parallel_iv(this);
4520 } else if (_head->is_LongCountedLoop() ||
4521 phase->is_counted_loop(_head, loop, T_LONG)) {
4522 remove_safepoints(phase, true);
4523 } else {
4524 assert(!_head->is_Loop() || !_head->as_Loop()->is_loop_nest_inner_loop(), "transformation to counted loop should not fail");
4525 if (_parent != nullptr && !_irreducible) {
4526 // Not a counted loop. Keep one safepoint.
4527 bool keep_one_sfpt = true;
4528 remove_safepoints(phase, keep_one_sfpt);
4529 }
4530 }
4531
4532 // Recursively
4533 assert(loop->_child != this || (loop->_head->as_Loop()->is_OuterStripMinedLoop() && _head->as_CountedLoop()->is_strip_mined()), "what kind of loop was added?");
4534 assert(loop->_child != this || (loop->_child->_child == nullptr && loop->_child->_next == nullptr), "would miss some loops");
4535 if (loop->_child && loop->_child != this) loop->_child->counted_loop(phase);
4536 if (loop->_next) loop->_next ->counted_loop(phase);
4537 }
4538
4539
4540 // The Estimated Loop Clone Size:
4541 // CloneFactor * (~112% * BodySize + BC) + CC + FanOutTerm,
4542 // where BC and CC are totally ad-hoc/magic "body" and "clone" constants,
4543 // respectively, used to ensure that the node usage estimates made are on the
4544 // safe side, for the most part. The FanOutTerm is an attempt to estimate the
4545 // possible additional/excessive nodes generated due to data and control flow
4546 // merging, for edges reaching outside the loop.
4547 uint IdealLoopTree::est_loop_clone_sz(uint factor) const {
4548
4549 precond(0 < factor && factor < 16);
4550
4551 uint const bc = 13;
4552 uint const cc = 17;
4553 uint const sz = _body.size() + (_body.size() + 7) / 2;
4554 uint estimate = factor * (sz + bc) + cc;
4555
4556 assert((estimate - cc) / factor == sz + bc, "overflow");
4557
4558 return estimate + est_loop_flow_merge_sz();
4559 }
4560
4561 // The Estimated Loop (full-) Unroll Size:
4562 // UnrollFactor * (~106% * BodySize) + CC + FanOutTerm,
4563 // where CC is a (totally) ad-hoc/magic "clone" constant, used to ensure that
4564 // node usage estimates made are on the safe side, for the most part. This is
4565 // a "light" version of the loop clone size calculation (above), based on the
4566 // assumption that most of the loop-construct overhead will be unraveled when
4567 // (fully) unrolled. Defined for unroll factors larger or equal to one (>=1),
4568 // including an overflow check and returning UINT_MAX in case of an overflow.
4569 uint IdealLoopTree::est_loop_unroll_sz(uint factor) const {
4570
4571 precond(factor > 0);
4572
4573 // Take into account that after unroll conjoined heads and tails will fold.
4574 uint const b0 = _body.size() - EMPTY_LOOP_SIZE;
4575 uint const cc = 7;
4576 uint const sz = b0 + (b0 + 15) / 16;
4577 uint estimate = factor * sz + cc;
4578
4579 if ((estimate - cc) / factor != sz) {
4580 return UINT_MAX;
4581 }
4582
4583 return estimate + est_loop_flow_merge_sz();
4584 }
4585
4586 // Estimate the growth effect (in nodes) of merging control and data flow when
4587 // cloning a loop body, based on the amount of control and data flow reaching
4588 // outside of the (current) loop body.
4589 uint IdealLoopTree::est_loop_flow_merge_sz() const {
4590
4591 uint ctrl_edge_out_cnt = 0;
4592 uint data_edge_out_cnt = 0;
4593
4594 for (uint i = 0; i < _body.size(); i++) {
4595 Node* node = _body.at(i);
4596 uint outcnt = node->outcnt();
4597
4598 for (uint k = 0; k < outcnt; k++) {
4599 Node* out = node->raw_out(k);
4600 if (out == nullptr) continue;
4601 if (out->is_CFG()) {
4602 if (!is_member(_phase->get_loop(out))) {
4603 ctrl_edge_out_cnt++;
4604 }
4605 } else if (_phase->has_ctrl(out)) {
4606 Node* ctrl = _phase->get_ctrl(out);
4607 assert(ctrl != nullptr, "must be");
4608 assert(ctrl->is_CFG(), "must be");
4609 if (!is_member(_phase->get_loop(ctrl))) {
4610 data_edge_out_cnt++;
4611 }
4612 }
4613 }
4614 }
4615 // Use data and control count (x2.0) in estimate iff both are > 0. This is
4616 // a rather pessimistic estimate for the most part, in particular for some
4617 // complex loops, but still not enough to capture all loops.
4618 if (ctrl_edge_out_cnt > 0 && data_edge_out_cnt > 0) {
4619 return 2 * (ctrl_edge_out_cnt + data_edge_out_cnt);
4620 }
4621 return 0;
4622 }
4623
4624 #ifndef PRODUCT
4625 //------------------------------dump_head--------------------------------------
4626 // Dump 1 liner for loop header info
4627 void IdealLoopTree::dump_head() {
4628 tty->sp(2 * _nest);
4629 tty->print("Loop: N%d/N%d ", _head->_idx, _tail->_idx);
4630 if (_irreducible) tty->print(" IRREDUCIBLE");
4631 Node* entry = _head->is_Loop() ? _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl)
4632 : _head->in(LoopNode::EntryControl);
4633 const Predicates predicates(entry);
4634 if (predicates.loop_limit_check_predicate_block()->is_non_empty()) {
4635 tty->print(" limit_check");
4636 }
4637 if (predicates.short_running_long_loop_predicate_block()->is_non_empty()) {
4638 tty->print(" short_running");
4639 }
4640 if (UseLoopPredicate) {
4641 if (UseProfiledLoopPredicate && predicates.profiled_loop_predicate_block()->is_non_empty()) {
4642 tty->print(" profile_predicated");
4643 }
4644 if (predicates.loop_predicate_block()->is_non_empty()) {
4645 tty->print(" predicated");
4646 }
4647 }
4648 if (UseAutoVectorizationPredicate && predicates.auto_vectorization_check_block()->is_non_empty()) {
4649 tty->print(" auto_vectorization_check_predicate");
4650 }
4651 if (_head->is_CountedLoop()) {
4652 CountedLoopNode *cl = _head->as_CountedLoop();
4653 tty->print(" counted");
4654
4655 Node* init_n = cl->init_trip();
4656 if (init_n != nullptr && init_n->is_Con())
4657 tty->print(" [%d,", cl->init_trip()->get_int());
4658 else
4659 tty->print(" [int,");
4660 Node* limit_n = cl->limit();
4661 if (limit_n != nullptr && limit_n->is_Con())
4662 tty->print("%d),", cl->limit()->get_int());
4663 else
4664 tty->print("int),");
4665 int stride_con = cl->stride_con();
4666 if (stride_con > 0) tty->print("+");
4667 tty->print("%d", stride_con);
4668
4669 tty->print(" (%0.f iters) ", cl->profile_trip_cnt());
4670
4671 if (cl->is_pre_loop ()) tty->print(" pre" );
4672 if (cl->is_main_loop()) tty->print(" main");
4673 if (cl->is_post_loop()) tty->print(" post");
4674 if (cl->is_vectorized_loop()) tty->print(" vector");
4675 if (range_checks_present()) tty->print(" rc ");
4676 if (cl->is_multiversion_fast_loop()) { tty->print(" multiversion_fast"); }
4677 if (cl->is_multiversion_slow_loop()) { tty->print(" multiversion_slow"); }
4678 if (cl->is_multiversion_delayed_slow_loop()) { tty->print(" multiversion_delayed_slow"); }
4679 }
4680 if (_has_call) tty->print(" has_call");
4681 if (_has_sfpt) tty->print(" has_sfpt");
4682 if (_rce_candidate) tty->print(" rce");
4683 if (_safepts != nullptr && _safepts->size() > 0) {
4684 tty->print(" sfpts={"); _safepts->dump_simple(); tty->print(" }");
4685 }
4686 if (_required_safept != nullptr && _required_safept->size() > 0) {
4687 tty->print(" req={"); _required_safept->dump_simple(); tty->print(" }");
4688 }
4689 if (Verbose) {
4690 tty->print(" body={"); _body.dump_simple(); tty->print(" }");
4691 }
4692 if (_head->is_Loop() && _head->as_Loop()->is_strip_mined()) {
4693 tty->print(" strip_mined");
4694 }
4695 tty->cr();
4696 }
4697
4698 //------------------------------dump-------------------------------------------
4699 // Dump loops by loop tree
4700 void IdealLoopTree::dump() {
4701 dump_head();
4702 if (_child) _child->dump();
4703 if (_next) _next ->dump();
4704 }
4705
4706 #endif
4707
4708 static void log_loop_tree_helper(IdealLoopTree* root, IdealLoopTree* loop, CompileLog* log) {
4709 if (loop == root) {
4710 if (loop->_child != nullptr) {
4711 log->begin_head("loop_tree");
4712 log->end_head();
4713 log_loop_tree_helper(root, loop->_child, log);
4714 log->tail("loop_tree");
4715 assert(loop->_next == nullptr, "what?");
4716 }
4717 } else if (loop != nullptr) {
4718 Node* head = loop->_head;
4719 log->begin_head("loop idx='%d'", head->_idx);
4720 if (loop->_irreducible) log->print(" irreducible='1'");
4721 if (head->is_Loop()) {
4722 if (head->as_Loop()->is_inner_loop()) log->print(" inner_loop='1'");
4723 if (head->as_Loop()->is_partial_peel_loop()) log->print(" partial_peel_loop='1'");
4724 } else if (head->is_CountedLoop()) {
4725 CountedLoopNode* cl = head->as_CountedLoop();
4726 if (cl->is_pre_loop()) log->print(" pre_loop='%d'", cl->main_idx());
4727 if (cl->is_main_loop()) log->print(" main_loop='%d'", cl->_idx);
4728 if (cl->is_post_loop()) log->print(" post_loop='%d'", cl->main_idx());
4729 }
4730 log->end_head();
4731 log_loop_tree_helper(root, loop->_child, log);
4732 log->tail("loop");
4733 log_loop_tree_helper(root, loop->_next, log);
4734 }
4735 }
4736
4737 void PhaseIdealLoop::log_loop_tree() {
4738 if (C->log() != nullptr) {
4739 log_loop_tree_helper(_ltree_root, _ltree_root, C->log());
4740 }
4741 }
4742
4743 // Eliminate all Parse and Template Assertion Predicates that are not associated with a loop anymore. The eliminated
4744 // predicates will be removed during the next round of IGVN.
4745 void PhaseIdealLoop::eliminate_useless_predicates() const {
4746 if (C->parse_predicate_count() == 0 && C->template_assertion_predicate_count() == 0) {
4747 return; // No predicates left.
4748 }
4749
4750 EliminateUselessPredicates eliminate_useless_predicates(_igvn, _ltree_root);
4751 eliminate_useless_predicates.eliminate();
4752 }
4753
4754 // If a post or main loop is removed due to an assert predicate, the opaque that guards the loop is not needed anymore
4755 void PhaseIdealLoop::eliminate_useless_zero_trip_guard() {
4756 if (_zero_trip_guard_opaque_nodes.size() == 0) {
4757 return;
4758 }
4759 Unique_Node_List useful_zero_trip_guard_opaques_nodes;
4760 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
4761 IdealLoopTree* lpt = iter.current();
4762 if (lpt->_child == nullptr && lpt->is_counted()) {
4763 CountedLoopNode* head = lpt->_head->as_CountedLoop();
4764 Node* opaque = head->is_canonical_loop_entry();
4765 if (opaque != nullptr) {
4766 useful_zero_trip_guard_opaques_nodes.push(opaque);
4767 }
4768 }
4769 }
4770 for (uint i = 0; i < _zero_trip_guard_opaque_nodes.size(); ++i) {
4771 OpaqueZeroTripGuardNode* opaque = ((OpaqueZeroTripGuardNode*)_zero_trip_guard_opaque_nodes.at(i));
4772 DEBUG_ONLY(CountedLoopNode* guarded_loop = opaque->guarded_loop());
4773 if (!useful_zero_trip_guard_opaques_nodes.member(opaque)) {
4774 IfNode* iff = opaque->if_node();
4775 IdealLoopTree* loop = get_loop(iff);
4776 while (loop != _ltree_root && loop != nullptr) {
4777 loop = loop->_parent;
4778 }
4779 if (loop == nullptr) {
4780 // unreachable from _ltree_root: zero trip guard is in a newly discovered infinite loop.
4781 // We can't tell if the opaque node is useful or not
4782 assert(guarded_loop == nullptr || guarded_loop->is_in_infinite_subgraph(), "");
4783 } else {
4784 assert(guarded_loop == nullptr, "");
4785 this->_igvn.replace_node(opaque, opaque->in(1));
4786 }
4787 } else {
4788 assert(guarded_loop != nullptr, "");
4789 }
4790 }
4791 }
4792
4793 void PhaseIdealLoop::eliminate_useless_multiversion_if() {
4794 if (_multiversion_opaque_nodes.size() == 0) {
4795 return;
4796 }
4797
4798 ResourceMark rm;
4799 Unique_Node_List useful_multiversioning_opaque_nodes;
4800
4801 // The OpaqueMultiversioning is only used from the fast main loop in AutoVectorization, to add
4802 // speculative runtime-checks to the multiversion_if. Thus, a OpaqueMultiversioning is only
4803 // useful if it can be found from a fast main loop. If it can not be found from a fast main loop,
4804 // then we cannot ever use that multiversion_if to add more speculative runtime-checks, and hence
4805 // it is useless. If it is still in delayed mode, i.e. has not yet had any runtime-checks added,
4806 // then we can let it constant fold towards the fast loop.
4807 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
4808 IdealLoopTree* lpt = iter.current();
4809 if (lpt->_child == nullptr && lpt->is_counted()) {
4810 CountedLoopNode* head = lpt->_head->as_CountedLoop();
4811 if (head->is_main_loop() && head->is_multiversion_fast_loop()) {
4812 // There are fast_loop pre/main/post loops, but the finding traversal starts at the main
4813 // loop, and traverses via the fast pre loop to the multiversion_if.
4814 IfNode* multiversion_if = head->find_multiversion_if_from_multiversion_fast_main_loop();
4815 if (multiversion_if != nullptr) {
4816 useful_multiversioning_opaque_nodes.push(multiversion_if->in(1)->as_OpaqueMultiversioning());
4817 } else {
4818 // We could not find the multiversion_if, and would never find it again. Remove the
4819 // multiversion marking for consistency.
4820 head->set_no_multiversion();
4821 }
4822 }
4823 }
4824 }
4825
4826 for (uint i = 0; i < _multiversion_opaque_nodes.size(); i++) {
4827 OpaqueMultiversioningNode* opaque = _multiversion_opaque_nodes.at(i)->as_OpaqueMultiversioning();
4828 if (!useful_multiversioning_opaque_nodes.member(opaque)) {
4829 if (opaque->is_delayed_slow_loop()) {
4830 // We cannot hack the node directly, otherwise the slow_loop will complain that it cannot
4831 // find the multiversioning opaque node. Instead, we mark the opaque node as useless, and
4832 // it can be constant folded during IGVN.
4833 opaque->mark_useless(_igvn);
4834 }
4835 }
4836 }
4837 }
4838
4839 //------------------------process_expensive_nodes-----------------------------
4840 // Expensive nodes have their control input set to prevent the GVN
4841 // from commoning them and as a result forcing the resulting node to
4842 // be in a more frequent path. Use CFG information here, to change the
4843 // control inputs so that some expensive nodes can be commoned while
4844 // not executed more frequently.
4845 bool PhaseIdealLoop::process_expensive_nodes() {
4846 assert(OptimizeExpensiveOps, "optimization off?");
4847
4848 // Sort nodes to bring similar nodes together
4849 C->sort_expensive_nodes();
4850
4851 bool progress = false;
4852
4853 for (int i = 0; i < C->expensive_count(); ) {
4854 Node* n = C->expensive_node(i);
4855 int start = i;
4856 // Find nodes similar to n
4857 i++;
4858 for (; i < C->expensive_count() && Compile::cmp_expensive_nodes(n, C->expensive_node(i)) == 0; i++);
4859 int end = i;
4860 // And compare them two by two
4861 for (int j = start; j < end; j++) {
4862 Node* n1 = C->expensive_node(j);
4863 if (is_node_unreachable(n1)) {
4864 continue;
4865 }
4866 for (int k = j+1; k < end; k++) {
4867 Node* n2 = C->expensive_node(k);
4868 if (is_node_unreachable(n2)) {
4869 continue;
4870 }
4871
4872 assert(n1 != n2, "should be pair of nodes");
4873
4874 Node* c1 = n1->in(0);
4875 Node* c2 = n2->in(0);
4876
4877 Node* parent_c1 = c1;
4878 Node* parent_c2 = c2;
4879
4880 // The call to get_early_ctrl_for_expensive() moves the
4881 // expensive nodes up but stops at loops that are in a if
4882 // branch. See whether we can exit the loop and move above the
4883 // If.
4884 if (c1->is_Loop()) {
4885 parent_c1 = c1->in(1);
4886 }
4887 if (c2->is_Loop()) {
4888 parent_c2 = c2->in(1);
4889 }
4890
4891 if (parent_c1 == parent_c2) {
4892 _igvn._worklist.push(n1);
4893 _igvn._worklist.push(n2);
4894 continue;
4895 }
4896
4897 // Look for identical expensive node up the dominator chain.
4898 if (is_dominator(c1, c2)) {
4899 c2 = c1;
4900 } else if (is_dominator(c2, c1)) {
4901 c1 = c2;
4902 } else if (parent_c1->is_Proj() && parent_c1->in(0)->is_If() &&
4903 parent_c2->is_Proj() && parent_c1->in(0) == parent_c2->in(0)) {
4904 // Both branches have the same expensive node so move it up
4905 // before the if.
4906 c1 = c2 = idom(parent_c1->in(0));
4907 }
4908 // Do the actual moves
4909 if (n1->in(0) != c1) {
4910 _igvn.replace_input_of(n1, 0, c1);
4911 progress = true;
4912 }
4913 if (n2->in(0) != c2) {
4914 _igvn.replace_input_of(n2, 0, c2);
4915 progress = true;
4916 }
4917 }
4918 }
4919 }
4920
4921 return progress;
4922 }
4923
4924 //=============================================================================
4925 //----------------------------build_and_optimize-------------------------------
4926 // Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to
4927 // its corresponding LoopNode. If 'optimize' is true, do some loop cleanups.
4928 void PhaseIdealLoop::build_and_optimize() {
4929 assert(!C->post_loop_opts_phase(), "no loop opts allowed");
4930
4931 bool do_split_ifs = (_mode == LoopOptsDefault);
4932 bool skip_loop_opts = (_mode == LoopOptsNone);
4933 bool do_max_unroll = (_mode == LoopOptsMaxUnroll);
4934
4935
4936 int old_progress = C->major_progress();
4937 uint orig_worklist_size = _igvn._worklist.size();
4938
4939 // Reset major-progress flag for the driver's heuristics
4940 C->clear_major_progress();
4941
4942 #ifndef PRODUCT
4943 // Capture for later assert
4944 uint unique = C->unique();
4945 _loop_invokes++;
4946 _loop_work += unique;
4947 #endif
4948
4949 // True if the method has at least 1 irreducible loop
4950 _has_irreducible_loops = false;
4951
4952 _created_loop_node = false;
4953
4954 VectorSet visited;
4955 // Pre-grow the mapping from Nodes to IdealLoopTrees.
4956 _loop_or_ctrl.map(C->unique(), nullptr);
4957 memset(_loop_or_ctrl.adr(), 0, wordSize * C->unique());
4958
4959 // Pre-build the top-level outermost loop tree entry
4960 _ltree_root = new IdealLoopTree( this, C->root(), C->root() );
4961 // Do not need a safepoint at the top level
4962 _ltree_root->_has_sfpt = 1;
4963
4964 // Initialize Dominators.
4965 // Checked in clone_loop_predicate() during beautify_loops().
4966 _idom_size = 0;
4967 _idom = nullptr;
4968 _dom_depth = nullptr;
4969 _dom_stk = nullptr;
4970
4971 // Empty pre-order array
4972 allocate_preorders();
4973
4974 // Build a loop tree on the fly. Build a mapping from CFG nodes to
4975 // IdealLoopTree entries. Data nodes are NOT walked.
4976 build_loop_tree();
4977 // Check for bailout, and return
4978 if (C->failing()) {
4979 return;
4980 }
4981
4982 // Verify that the has_loops() flag set at parse time is consistent with the just built loop tree. When the back edge
4983 // is an exception edge, parsing doesn't set has_loops().
4984 assert(_ltree_root->_child == nullptr || C->has_loops() || C->has_exception_backedge(), "parsing found no loops but there are some");
4985 // No loops after all
4986 if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false);
4987
4988 // There should always be an outer loop containing the Root and Return nodes.
4989 // If not, we have a degenerate empty program. Bail out in this case.
4990 if (!has_node(C->root())) {
4991 if (!_verify_only) {
4992 C->clear_major_progress();
4993 assert(false, "empty program detected during loop optimization");
4994 C->record_method_not_compilable("empty program detected during loop optimization");
4995 }
4996 return;
4997 }
4998
4999 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5000 // Nothing to do, so get out
5001 bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !do_max_unroll && !_verify_me &&
5002 !_verify_only && !bs->is_gc_specific_loop_opts_pass(_mode);
5003 bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn);
5004 bool strip_mined_loops_expanded = bs->strip_mined_loops_expanded(_mode);
5005 if (stop_early && !do_expensive_nodes) {
5006 return;
5007 }
5008
5009 // Set loop nesting depth
5010 _ltree_root->set_nest( 0 );
5011
5012 // Split shared headers and insert loop landing pads.
5013 // Do not bother doing this on the Root loop of course.
5014 if( !_verify_me && !_verify_only && _ltree_root->_child ) {
5015 C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3);
5016 if( _ltree_root->_child->beautify_loops( this ) ) {
5017 // Re-build loop tree!
5018 _ltree_root->_child = nullptr;
5019 _loop_or_ctrl.clear();
5020 reallocate_preorders();
5021 build_loop_tree();
5022 // Check for bailout, and return
5023 if (C->failing()) {
5024 return;
5025 }
5026 // Reset loop nesting depth
5027 _ltree_root->set_nest( 0 );
5028
5029 C->print_method(PHASE_AFTER_BEAUTIFY_LOOPS, 3);
5030 }
5031 }
5032
5033 // Build Dominators for elision of null checks & loop finding.
5034 // Since nodes do not have a slot for immediate dominator, make
5035 // a persistent side array for that info indexed on node->_idx.
5036 _idom_size = C->unique();
5037 _idom = NEW_RESOURCE_ARRAY( Node*, _idom_size );
5038 _dom_depth = NEW_RESOURCE_ARRAY( uint, _idom_size );
5039 _dom_stk = nullptr; // Allocated on demand in recompute_dom_depth
5040 memset( _dom_depth, 0, _idom_size * sizeof(uint) );
5041
5042 Dominators();
5043
5044 if (!_verify_only) {
5045 // As a side effect, Dominators removed any unreachable CFG paths
5046 // into RegionNodes. It doesn't do this test against Root, so
5047 // we do it here.
5048 for( uint i = 1; i < C->root()->req(); i++ ) {
5049 if (!_loop_or_ctrl[C->root()->in(i)->_idx]) { // Dead path into Root?
5050 _igvn.delete_input_of(C->root(), i);
5051 i--; // Rerun same iteration on compressed edges
5052 }
5053 }
5054
5055 // Given dominators, try to find inner loops with calls that must
5056 // always be executed (call dominates loop tail). These loops do
5057 // not need a separate safepoint.
5058 Node_List cisstack;
5059 _ltree_root->check_safepts(visited, cisstack);
5060 }
5061
5062 // Walk the DATA nodes and place into loops. Find earliest control
5063 // node. For CFG nodes, the _loop_or_ctrl array starts out and remains
5064 // holding the associated IdealLoopTree pointer. For DATA nodes, the
5065 // _loop_or_ctrl array holds the earliest legal controlling CFG node.
5066
5067 // Allocate stack with enough space to avoid frequent realloc
5068 int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats
5069 Node_Stack nstack(stack_size);
5070
5071 visited.clear();
5072 Node_List worklist;
5073 // Don't need C->root() on worklist since
5074 // it will be processed among C->top() inputs
5075 worklist.push(C->top());
5076 visited.set(C->top()->_idx); // Set C->top() as visited now
5077 build_loop_early( visited, worklist, nstack );
5078
5079 // Given early legal placement, try finding counted loops. This placement
5080 // is good enough to discover most loop invariants.
5081 if (!_verify_me && !_verify_only && !strip_mined_loops_expanded) {
5082 _ltree_root->counted_loop( this );
5083 }
5084
5085 // Find latest loop placement. Find ideal loop placement.
5086 visited.clear();
5087 init_dom_lca_tags();
5088 // Need C->root() on worklist when processing outs
5089 worklist.push(C->root());
5090 NOT_PRODUCT( C->verify_graph_edges(); )
5091 worklist.push(C->top());
5092 build_loop_late( visited, worklist, nstack );
5093 if (C->failing()) { return; }
5094
5095 if (_verify_only) {
5096 C->restore_major_progress(old_progress);
5097 assert(C->unique() == unique, "verification _mode made Nodes? ? ?");
5098 assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything");
5099 return;
5100 }
5101
5102 // clear out the dead code after build_loop_late
5103 while (_deadlist.size()) {
5104 _igvn.remove_globally_dead_node(_deadlist.pop());
5105 }
5106
5107 eliminate_useless_zero_trip_guard();
5108 eliminate_useless_multiversion_if();
5109
5110 if (stop_early) {
5111 assert(do_expensive_nodes, "why are we here?");
5112 if (process_expensive_nodes()) {
5113 // If we made some progress when processing expensive nodes then
5114 // the IGVN may modify the graph in a way that will allow us to
5115 // make some more progress: we need to try processing expensive
5116 // nodes again.
5117 C->set_major_progress();
5118 }
5119 return;
5120 }
5121
5122 // Some parser-inserted loop predicates could never be used by loop
5123 // predication or they were moved away from loop during some optimizations.
5124 // For example, peeling. Eliminate them before next loop optimizations.
5125 eliminate_useless_predicates();
5126
5127 #ifndef PRODUCT
5128 C->verify_graph_edges();
5129 if (_verify_me) { // Nested verify pass?
5130 // Check to see if the verify _mode is broken
5131 assert(C->unique() == unique, "non-optimize _mode made Nodes? ? ?");
5132 return;
5133 }
5134 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
5135 if (TraceLoopOpts && C->has_loops()) {
5136 _ltree_root->dump();
5137 }
5138 #endif
5139
5140 if (skip_loop_opts) {
5141 C->restore_major_progress(old_progress);
5142 return;
5143 }
5144
5145 if (do_max_unroll) {
5146 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
5147 IdealLoopTree* lpt = iter.current();
5148 if (lpt->is_innermost() && lpt->_allow_optimizations && !lpt->_has_call && lpt->is_counted()) {
5149 lpt->compute_trip_count(this, T_INT);
5150 if (!lpt->do_one_iteration_loop(this) &&
5151 !lpt->do_remove_empty_loop(this)) {
5152 AutoNodeBudget node_budget(this);
5153 if (lpt->_head->as_CountedLoop()->is_normal_loop() &&
5154 lpt->policy_maximally_unroll(this)) {
5155 memset( worklist.adr(), 0, worklist.max()*sizeof(Node*) );
5156 do_maximally_unroll(lpt, worklist);
5157 }
5158 }
5159 }
5160 }
5161
5162 C->restore_major_progress(old_progress);
5163 return;
5164 }
5165
5166 if (bs->optimize_loops(this, _mode, visited, nstack, worklist)) {
5167 return;
5168 }
5169
5170 if (ReassociateInvariants && !C->major_progress()) {
5171 // Reassociate invariants and prep for split_thru_phi
5172 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
5173 IdealLoopTree* lpt = iter.current();
5174 if (!lpt->is_loop()) {
5175 continue;
5176 }
5177 Node* head = lpt->_head;
5178 if (!head->is_BaseCountedLoop() || !lpt->is_innermost()) continue;
5179
5180 // check for vectorized loops, any reassociation of invariants was already done
5181 if (head->is_CountedLoop()) {
5182 if (head->as_CountedLoop()->is_unroll_only()) {
5183 continue;
5184 } else {
5185 AutoNodeBudget node_budget(this);
5186 lpt->reassociate_invariants(this);
5187 }
5188 }
5189 // Because RCE opportunities can be masked by split_thru_phi,
5190 // look for RCE candidates and inhibit split_thru_phi
5191 // on just their loop-phi's for this pass of loop opts
5192 if (SplitIfBlocks && do_split_ifs &&
5193 head->as_BaseCountedLoop()->is_valid_counted_loop(head->as_BaseCountedLoop()->bt()) &&
5194 (lpt->policy_range_check(this, true, T_LONG) ||
5195 (head->is_CountedLoop() && lpt->policy_range_check(this, true, T_INT)))) {
5196 lpt->_rce_candidate = 1; // = true
5197 }
5198 }
5199 }
5200
5201 // Check for aggressive application of split-if and other transforms
5202 // that require basic-block info (like cloning through Phi's)
5203 if (!C->major_progress() && SplitIfBlocks && do_split_ifs) {
5204 visited.clear();
5205 split_if_with_blocks(visited, nstack);
5206 if (C->failing()) {
5207 return;
5208 }
5209 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
5210 }
5211
5212 if (!C->major_progress() && do_expensive_nodes && process_expensive_nodes()) {
5213 C->set_major_progress();
5214 }
5215
5216 // Perform loop predication before iteration splitting
5217 if (UseLoopPredicate && C->has_loops() && !C->major_progress() && (C->parse_predicate_count() > 0)) {
5218 _ltree_root->_child->loop_predication(this);
5219 }
5220
5221 if (OptimizeFill && UseLoopPredicate && C->has_loops() && !C->major_progress()) {
5222 if (do_intrinsify_fill()) {
5223 C->set_major_progress();
5224 }
5225 }
5226
5227 // Perform iteration-splitting on inner loops. Split iterations to avoid
5228 // range checks or one-shot null checks.
5229
5230 // If split-if's didn't hack the graph too bad (no CFG changes)
5231 // then do loop opts.
5232 if (C->has_loops() && !C->major_progress()) {
5233 memset( worklist.adr(), 0, worklist.max()*sizeof(Node*) );
5234 _ltree_root->_child->iteration_split( this, worklist );
5235 // No verify after peeling! GCM has hoisted code out of the loop.
5236 // After peeling, the hoisted code could sink inside the peeled area.
5237 // The peeling code does not try to recompute the best location for
5238 // all the code before the peeled area, so the verify pass will always
5239 // complain about it.
5240 }
5241
5242 // Check for bailout, and return
5243 if (C->failing()) {
5244 return;
5245 }
5246
5247 // Do verify graph edges in any case
5248 NOT_PRODUCT( C->verify_graph_edges(); );
5249
5250 if (!do_split_ifs) {
5251 // We saw major progress in Split-If to get here. We forced a
5252 // pass with unrolling and not split-if, however more split-if's
5253 // might make progress. If the unrolling didn't make progress
5254 // then the major-progress flag got cleared and we won't try
5255 // another round of Split-If. In particular the ever-common
5256 // instance-of/check-cast pattern requires at least 2 rounds of
5257 // Split-If to clear out.
5258 C->set_major_progress();
5259 }
5260
5261 // Repeat loop optimizations if new loops were seen
5262 if (created_loop_node()) {
5263 C->set_major_progress();
5264 }
5265
5266 // Auto-vectorize main-loop
5267 if (C->do_superword() && C->has_loops() && !C->major_progress()) {
5268 Compile::TracePhase tp(_t_autoVectorize);
5269
5270 // Shared data structures for all AutoVectorizations, to reduce allocations
5271 // of large arrays.
5272 VSharedData vshared;
5273 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
5274 IdealLoopTree* lpt = iter.current();
5275 AutoVectorizeStatus status = auto_vectorize(lpt, vshared);
5276
5277 if (status == AutoVectorizeStatus::TriedAndFailed) {
5278 // We tried vectorization, but failed. From now on only unroll the loop.
5279 CountedLoopNode* cl = lpt->_head->as_CountedLoop();
5280 if (cl->has_passed_slp()) {
5281 C->set_major_progress();
5282 cl->set_notpassed_slp();
5283 cl->mark_do_unroll_only();
5284 }
5285 }
5286 }
5287 }
5288
5289 // Keep loop predicates and perform optimizations with them
5290 // until no more loop optimizations could be done.
5291 // After that switch predicates off and do more loop optimizations.
5292 if (!C->major_progress() && (C->parse_predicate_count() > 0)) {
5293 C->mark_parse_predicate_nodes_useless(_igvn);
5294 assert(C->parse_predicate_count() == 0, "should be zero now");
5295 if (TraceLoopOpts) {
5296 tty->print_cr("PredicatesOff");
5297 }
5298 C->set_major_progress();
5299 }
5300 }
5301
5302 #ifndef PRODUCT
5303 //------------------------------print_statistics-------------------------------
5304 int PhaseIdealLoop::_loop_invokes=0;// Count of PhaseIdealLoop invokes
5305 int PhaseIdealLoop::_loop_work=0; // Sum of PhaseIdealLoop x unique
5306 volatile int PhaseIdealLoop::_long_loop_candidates=0; // Number of long loops seen
5307 volatile int PhaseIdealLoop::_long_loop_nests=0; // Number of long loops successfully transformed to a nest
5308 volatile int PhaseIdealLoop::_long_loop_counted_loops=0; // Number of long loops successfully transformed to a counted loop
5309 void PhaseIdealLoop::print_statistics() {
5310 tty->print_cr("PhaseIdealLoop=%d, sum _unique=%d, long loops=%d/%d/%d", _loop_invokes, _loop_work, _long_loop_counted_loops, _long_loop_nests, _long_loop_candidates);
5311 }
5312 #endif
5313
5314 #ifdef ASSERT
5315 // Build a verify-only PhaseIdealLoop, and see that it agrees with "this".
5316 void PhaseIdealLoop::verify() const {
5317 ResourceMark rm;
5318 int old_progress = C->major_progress();
5319 bool success = true;
5320
5321 PhaseIdealLoop phase_verify(_igvn, this);
5322 if (C->failing_internal()) {
5323 return;
5324 }
5325
5326 // Verify ctrl and idom of every node.
5327 success &= verify_idom_and_nodes(C->root(), &phase_verify);
5328
5329 // Verify loop-tree.
5330 success &= _ltree_root->verify_tree(phase_verify._ltree_root);
5331
5332 assert(success, "VerifyLoopOptimizations failed");
5333
5334 // Major progress was cleared by creating a verify version of PhaseIdealLoop.
5335 C->restore_major_progress(old_progress);
5336 }
5337
5338 // Perform a BFS starting at n, through all inputs.
5339 // Call verify_idom and verify_node on all nodes of BFS traversal.
5340 bool PhaseIdealLoop::verify_idom_and_nodes(Node* root, const PhaseIdealLoop* phase_verify) const {
5341 Unique_Node_List worklist;
5342 worklist.push(root);
5343 bool success = true;
5344 for (uint i = 0; i < worklist.size(); i++) {
5345 Node* n = worklist.at(i);
5346 // process node
5347 success &= verify_idom(n, phase_verify);
5348 success &= verify_loop_ctrl(n, phase_verify);
5349 // visit inputs
5350 for (uint j = 0; j < n->req(); j++) {
5351 if (n->in(j) != nullptr) {
5352 worklist.push(n->in(j));
5353 }
5354 }
5355 }
5356 return success;
5357 }
5358
5359 // Verify dominator structure (IDOM).
5360 bool PhaseIdealLoop::verify_idom(Node* n, const PhaseIdealLoop* phase_verify) const {
5361 // Verify IDOM for all CFG nodes (except root).
5362 if (!n->is_CFG() || n->is_Root()) {
5363 return true; // pass
5364 }
5365
5366 if (n->_idx >= _idom_size) {
5367 tty->print("CFG Node with no idom: ");
5368 n->dump();
5369 return false; // fail
5370 }
5371
5372 Node* id = idom_no_update(n);
5373 Node* id_verify = phase_verify->idom_no_update(n);
5374 if (id != id_verify) {
5375 tty->print("Mismatching idom for node: ");
5376 n->dump();
5377 tty->print(" We have idom: ");
5378 id->dump();
5379 tty->print(" Verify has idom: ");
5380 id_verify->dump();
5381 tty->cr();
5382 return false; // fail
5383 }
5384 return true; // pass
5385 }
5386
5387 // Verify "_loop_or_ctrl": control and loop membership.
5388 // (0) _loop_or_ctrl[i] == nullptr -> node not reachable.
5389 // (1) has_ctrl -> check lowest bit. 1 -> data node. 0 -> ctrl node.
5390 // (2) has_ctrl true: get_ctrl_no_update returns ctrl of data node.
5391 // (3) has_ctrl false: get_loop_idx returns IdealLoopTree for ctrl node.
5392 bool PhaseIdealLoop::verify_loop_ctrl(Node* n, const PhaseIdealLoop* phase_verify) const {
5393 const uint i = n->_idx;
5394 // The loop-tree was built from def to use (top-down).
5395 // The verification happens from use to def (bottom-up).
5396 // We may thus find nodes during verification that are not in the loop-tree.
5397 if (_loop_or_ctrl[i] == nullptr || phase_verify->_loop_or_ctrl[i] == nullptr) {
5398 if (_loop_or_ctrl[i] != nullptr || phase_verify->_loop_or_ctrl[i] != nullptr) {
5399 tty->print_cr("Was reachable in only one. this %d, verify %d.",
5400 _loop_or_ctrl[i] != nullptr, phase_verify->_loop_or_ctrl[i] != nullptr);
5401 n->dump();
5402 return false; // fail
5403 }
5404 // Not reachable for both.
5405 return true; // pass
5406 }
5407
5408 if (n->is_CFG() == has_ctrl(n)) {
5409 tty->print_cr("Exactly one should be true: %d for is_CFG, %d for has_ctrl.", n->is_CFG(), has_ctrl(n));
5410 n->dump();
5411 return false; // fail
5412 }
5413
5414 if (has_ctrl(n) != phase_verify->has_ctrl(n)) {
5415 tty->print_cr("Mismatch has_ctrl: %d for this, %d for verify.", has_ctrl(n), phase_verify->has_ctrl(n));
5416 n->dump();
5417 return false; // fail
5418 } else if (has_ctrl(n)) {
5419 assert(phase_verify->has_ctrl(n), "sanity");
5420 // n is a data node.
5421 // Verify that its ctrl is the same.
5422
5423 // Broken part of VerifyLoopOptimizations (A)
5424 // Reason:
5425 // BUG, wrong control set for example in
5426 // PhaseIdealLoop::split_if_with_blocks
5427 // at "set_ctrl(x, new_ctrl);"
5428 /*
5429 if( _loop_or_ctrl[i] != loop_verify->_loop_or_ctrl[i] &&
5430 get_ctrl_no_update(n) != loop_verify->get_ctrl_no_update(n) ) {
5431 tty->print("Mismatched control setting for: ");
5432 n->dump();
5433 if( fail++ > 10 ) return;
5434 Node *c = get_ctrl_no_update(n);
5435 tty->print("We have it as: ");
5436 if( c->in(0) ) c->dump();
5437 else tty->print_cr("N%d",c->_idx);
5438 tty->print("Verify thinks: ");
5439 if( loop_verify->has_ctrl(n) )
5440 loop_verify->get_ctrl_no_update(n)->dump();
5441 else
5442 loop_verify->get_loop_idx(n)->dump();
5443 tty->cr();
5444 }
5445 */
5446 return true; // pass
5447 } else {
5448 assert(!phase_verify->has_ctrl(n), "sanity");
5449 // n is a ctrl node.
5450 // Verify that not has_ctrl, and that get_loop_idx is the same.
5451
5452 // Broken part of VerifyLoopOptimizations (B)
5453 // Reason:
5454 // NeverBranch node for example is added to loop outside its scope.
5455 // Once we run build_loop_tree again, it is added to the correct loop.
5456 /*
5457 if (!C->major_progress()) {
5458 // Loop selection can be messed up if we did a major progress
5459 // operation, like split-if. Do not verify in that case.
5460 IdealLoopTree *us = get_loop_idx(n);
5461 IdealLoopTree *them = loop_verify->get_loop_idx(n);
5462 if( us->_head != them->_head || us->_tail != them->_tail ) {
5463 tty->print("Unequals loops for: ");
5464 n->dump();
5465 if( fail++ > 10 ) return;
5466 tty->print("We have it as: ");
5467 us->dump();
5468 tty->print("Verify thinks: ");
5469 them->dump();
5470 tty->cr();
5471 }
5472 }
5473 */
5474 return true; // pass
5475 }
5476 }
5477
5478 static int compare_tree(IdealLoopTree* const& a, IdealLoopTree* const& b) {
5479 assert(a != nullptr && b != nullptr, "must be");
5480 return a->_head->_idx - b->_head->_idx;
5481 }
5482
5483 GrowableArray<IdealLoopTree*> IdealLoopTree::collect_sorted_children() const {
5484 GrowableArray<IdealLoopTree*> children;
5485 IdealLoopTree* child = _child;
5486 while (child != nullptr) {
5487 assert(child->_parent == this, "all must be children of this");
5488 children.insert_sorted<compare_tree>(child);
5489 child = child->_next;
5490 }
5491 return children;
5492 }
5493
5494 // Verify that tree structures match. Because the CFG can change, siblings
5495 // within the loop tree can be reordered. We attempt to deal with that by
5496 // reordering the verify's loop tree if possible.
5497 bool IdealLoopTree::verify_tree(IdealLoopTree* loop_verify) const {
5498 assert(_head == loop_verify->_head, "mismatched loop head");
5499 assert(this->_parent != nullptr || this->_next == nullptr, "is_root_loop implies has_no_sibling");
5500
5501 // Collect the children
5502 GrowableArray<IdealLoopTree*> children = collect_sorted_children();
5503 GrowableArray<IdealLoopTree*> children_verify = loop_verify->collect_sorted_children();
5504
5505 bool success = true;
5506
5507 // Compare the two children lists
5508 for (int i = 0, j = 0; i < children.length() || j < children_verify.length(); ) {
5509 IdealLoopTree* child = nullptr;
5510 IdealLoopTree* child_verify = nullptr;
5511 // Read from both lists, if possible.
5512 if (i < children.length()) {
5513 child = children.at(i);
5514 }
5515 if (j < children_verify.length()) {
5516 child_verify = children_verify.at(j);
5517 }
5518 assert(child != nullptr || child_verify != nullptr, "must find at least one");
5519 if (child != nullptr && child_verify != nullptr && child->_head != child_verify->_head) {
5520 // We found two non-equal children. Select the smaller one.
5521 if (child->_head->_idx < child_verify->_head->_idx) {
5522 child_verify = nullptr;
5523 } else {
5524 child = nullptr;
5525 }
5526 }
5527 // Process the two children, or potentially log the failure if we only found one.
5528 if (child_verify == nullptr) {
5529 if (child->_irreducible && Compile::current()->major_progress()) {
5530 // Irreducible loops can pick a different header (one of its entries).
5531 } else {
5532 tty->print_cr("We have a loop that verify does not have");
5533 child->dump();
5534 success = false;
5535 }
5536 i++; // step for this
5537 } else if (child == nullptr) {
5538 if (child_verify->_irreducible && Compile::current()->major_progress()) {
5539 // Irreducible loops can pick a different header (one of its entries).
5540 } else if (child_verify->_head->as_Region()->is_in_infinite_subgraph()) {
5541 // Infinite loops do not get attached to the loop-tree on their first visit.
5542 // "this" runs before "loop_verify". It is thus possible that we find the
5543 // infinite loop only for "child_verify". Only finding it with "child" would
5544 // mean that we lost it, which is not ok.
5545 } else {
5546 tty->print_cr("Verify has a loop that we do not have");
5547 child_verify->dump();
5548 success = false;
5549 }
5550 j++; // step for verify
5551 } else {
5552 assert(child->_head == child_verify->_head, "We have both and they are equal");
5553 success &= child->verify_tree(child_verify); // Recursion
5554 i++; // step for this
5555 j++; // step for verify
5556 }
5557 }
5558
5559 // Broken part of VerifyLoopOptimizations (D)
5560 // Reason:
5561 // split_if has to update the _tail, if it is modified. But that is done by
5562 // checking to what loop the iff belongs to. That info can be wrong, and then
5563 // we do not update the _tail correctly.
5564 /*
5565 Node *tail = _tail; // Inline a non-updating version of
5566 while( !tail->in(0) ) // the 'tail()' call.
5567 tail = tail->in(1);
5568 assert( tail == loop->_tail, "mismatched loop tail" );
5569 */
5570
5571 if (_head->is_CountedLoop()) {
5572 CountedLoopNode *cl = _head->as_CountedLoop();
5573
5574 Node* ctrl = cl->init_control();
5575 Node* back = cl->back_control();
5576 assert(ctrl != nullptr && ctrl->is_CFG(), "sane loop in-ctrl");
5577 assert(back != nullptr && back->is_CFG(), "sane loop backedge");
5578 cl->loopexit(); // assert implied
5579 }
5580
5581 // Broken part of VerifyLoopOptimizations (E)
5582 // Reason:
5583 // PhaseIdealLoop::split_thru_region creates new nodes for loop that are not added
5584 // to the loop body. Or maybe they are not added to the correct loop.
5585 // at "Node* x = n->clone();"
5586 /*
5587 // Innermost loops need to verify loop bodies,
5588 // but only if no 'major_progress'
5589 int fail = 0;
5590 if (!Compile::current()->major_progress() && _child == nullptr) {
5591 for( uint i = 0; i < _body.size(); i++ ) {
5592 Node *n = _body.at(i);
5593 if (n->outcnt() == 0) continue; // Ignore dead
5594 uint j;
5595 for( j = 0; j < loop->_body.size(); j++ )
5596 if( loop->_body.at(j) == n )
5597 break;
5598 if( j == loop->_body.size() ) { // Not found in loop body
5599 // Last ditch effort to avoid assertion: Its possible that we
5600 // have some users (so outcnt not zero) but are still dead.
5601 // Try to find from root.
5602 if (Compile::current()->root()->find(n->_idx)) {
5603 fail++;
5604 tty->print("We have that verify does not: ");
5605 n->dump();
5606 }
5607 }
5608 }
5609 for( uint i2 = 0; i2 < loop->_body.size(); i2++ ) {
5610 Node *n = loop->_body.at(i2);
5611 if (n->outcnt() == 0) continue; // Ignore dead
5612 uint j;
5613 for( j = 0; j < _body.size(); j++ )
5614 if( _body.at(j) == n )
5615 break;
5616 if( j == _body.size() ) { // Not found in loop body
5617 // Last ditch effort to avoid assertion: Its possible that we
5618 // have some users (so outcnt not zero) but are still dead.
5619 // Try to find from root.
5620 if (Compile::current()->root()->find(n->_idx)) {
5621 fail++;
5622 tty->print("Verify has that we do not: ");
5623 n->dump();
5624 }
5625 }
5626 }
5627 assert( !fail, "loop body mismatch" );
5628 }
5629 */
5630 return success;
5631 }
5632 #endif
5633
5634 //------------------------------set_idom---------------------------------------
5635 void PhaseIdealLoop::set_idom(Node* d, Node* n, uint dom_depth) {
5636 _nesting.check(); // Check if a potential reallocation in the resource arena is safe
5637 uint idx = d->_idx;
5638 if (idx >= _idom_size) {
5639 uint newsize = next_power_of_2(idx);
5640 _idom = REALLOC_RESOURCE_ARRAY( Node*, _idom,_idom_size,newsize);
5641 _dom_depth = REALLOC_RESOURCE_ARRAY( uint, _dom_depth,_idom_size,newsize);
5642 memset( _dom_depth + _idom_size, 0, (newsize - _idom_size) * sizeof(uint) );
5643 _idom_size = newsize;
5644 }
5645 _idom[idx] = n;
5646 _dom_depth[idx] = dom_depth;
5647 }
5648
5649 //------------------------------recompute_dom_depth---------------------------------------
5650 // The dominator tree is constructed with only parent pointers.
5651 // This recomputes the depth in the tree by first tagging all
5652 // nodes as "no depth yet" marker. The next pass then runs up
5653 // the dom tree from each node marked "no depth yet", and computes
5654 // the depth on the way back down.
5655 void PhaseIdealLoop::recompute_dom_depth() {
5656 uint no_depth_marker = C->unique();
5657 uint i;
5658 // Initialize depth to "no depth yet" and realize all lazy updates
5659 for (i = 0; i < _idom_size; i++) {
5660 // Only indices with a _dom_depth has a Node* or null (otherwise uninitialized).
5661 if (_dom_depth[i] > 0 && _idom[i] != nullptr) {
5662 _dom_depth[i] = no_depth_marker;
5663
5664 // heal _idom if it has a fwd mapping in _loop_or_ctrl
5665 if (_idom[i]->in(0) == nullptr) {
5666 idom(i);
5667 }
5668 }
5669 }
5670 if (_dom_stk == nullptr) {
5671 uint init_size = C->live_nodes() / 100; // Guess that 1/100 is a reasonable initial size.
5672 if (init_size < 10) init_size = 10;
5673 _dom_stk = new GrowableArray<uint>(init_size);
5674 }
5675 // Compute new depth for each node.
5676 for (i = 0; i < _idom_size; i++) {
5677 uint j = i;
5678 // Run up the dom tree to find a node with a depth
5679 while (_dom_depth[j] == no_depth_marker) {
5680 _dom_stk->push(j);
5681 j = _idom[j]->_idx;
5682 }
5683 // Compute the depth on the way back down this tree branch
5684 uint dd = _dom_depth[j] + 1;
5685 while (_dom_stk->length() > 0) {
5686 uint j = _dom_stk->pop();
5687 _dom_depth[j] = dd;
5688 dd++;
5689 }
5690 }
5691 }
5692
5693 //------------------------------sort-------------------------------------------
5694 // Insert 'loop' into the existing loop tree. 'innermost' is a leaf of the
5695 // loop tree, not the root.
5696 IdealLoopTree *PhaseIdealLoop::sort( IdealLoopTree *loop, IdealLoopTree *innermost ) {
5697 if( !innermost ) return loop; // New innermost loop
5698
5699 int loop_preorder = get_preorder(loop->_head); // Cache pre-order number
5700 assert( loop_preorder, "not yet post-walked loop" );
5701 IdealLoopTree **pp = &innermost; // Pointer to previous next-pointer
5702 IdealLoopTree *l = *pp; // Do I go before or after 'l'?
5703
5704 // Insert at start of list
5705 while( l ) { // Insertion sort based on pre-order
5706 if( l == loop ) return innermost; // Already on list!
5707 int l_preorder = get_preorder(l->_head); // Cache pre-order number
5708 assert( l_preorder, "not yet post-walked l" );
5709 // Check header pre-order number to figure proper nesting
5710 if( loop_preorder > l_preorder )
5711 break; // End of insertion
5712 // If headers tie (e.g., shared headers) check tail pre-order numbers.
5713 // Since I split shared headers, you'd think this could not happen.
5714 // BUT: I must first do the preorder numbering before I can discover I
5715 // have shared headers, so the split headers all get the same preorder
5716 // number as the RegionNode they split from.
5717 if( loop_preorder == l_preorder &&
5718 get_preorder(loop->_tail) < get_preorder(l->_tail) )
5719 break; // Also check for shared headers (same pre#)
5720 pp = &l->_parent; // Chain up list
5721 l = *pp;
5722 }
5723 // Link into list
5724 // Point predecessor to me
5725 *pp = loop;
5726 // Point me to successor
5727 IdealLoopTree *p = loop->_parent;
5728 loop->_parent = l; // Point me to successor
5729 if( p ) sort( p, innermost ); // Insert my parents into list as well
5730 return innermost;
5731 }
5732
5733 //------------------------------build_loop_tree--------------------------------
5734 // I use a modified Vick/Tarjan algorithm. I need pre- and a post- visit
5735 // bits. The _loop_or_ctrl[] array is mapped by Node index and holds a null for
5736 // not-yet-pre-walked, pre-order # for pre-but-not-post-walked and holds the
5737 // tightest enclosing IdealLoopTree for post-walked.
5738 //
5739 // During my forward walk I do a short 1-layer lookahead to see if I can find
5740 // a loop backedge with that doesn't have any work on the backedge. This
5741 // helps me construct nested loops with shared headers better.
5742 //
5743 // Once I've done the forward recursion, I do the post-work. For each child
5744 // I check to see if there is a backedge. Backedges define a loop! I
5745 // insert an IdealLoopTree at the target of the backedge.
5746 //
5747 // During the post-work I also check to see if I have several children
5748 // belonging to different loops. If so, then this Node is a decision point
5749 // where control flow can choose to change loop nests. It is at this
5750 // decision point where I can figure out how loops are nested. At this
5751 // time I can properly order the different loop nests from my children.
5752 // Note that there may not be any backedges at the decision point!
5753 //
5754 // Since the decision point can be far removed from the backedges, I can't
5755 // order my loops at the time I discover them. Thus at the decision point
5756 // I need to inspect loop header pre-order numbers to properly nest my
5757 // loops. This means I need to sort my childrens' loops by pre-order.
5758 // The sort is of size number-of-control-children, which generally limits
5759 // it to size 2 (i.e., I just choose between my 2 target loops).
5760 void PhaseIdealLoop::build_loop_tree() {
5761 // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
5762 GrowableArray <Node *> bltstack(C->live_nodes() >> 1);
5763 Node *n = C->root();
5764 bltstack.push(n);
5765 int pre_order = 1;
5766 int stack_size;
5767
5768 while ( ( stack_size = bltstack.length() ) != 0 ) {
5769 n = bltstack.top(); // Leave node on stack
5770 if ( !is_visited(n) ) {
5771 // ---- Pre-pass Work ----
5772 // Pre-walked but not post-walked nodes need a pre_order number.
5773
5774 set_preorder_visited( n, pre_order ); // set as visited
5775
5776 // ---- Scan over children ----
5777 // Scan first over control projections that lead to loop headers.
5778 // This helps us find inner-to-outer loops with shared headers better.
5779
5780 // Scan children's children for loop headers.
5781 for ( int i = n->outcnt() - 1; i >= 0; --i ) {
5782 Node* m = n->raw_out(i); // Child
5783 if( m->is_CFG() && !is_visited(m) ) { // Only for CFG children
5784 // Scan over children's children to find loop
5785 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
5786 Node* l = m->fast_out(j);
5787 if( is_visited(l) && // Been visited?
5788 !is_postvisited(l) && // But not post-visited
5789 get_preorder(l) < pre_order ) { // And smaller pre-order
5790 // Found! Scan the DFS down this path before doing other paths
5791 bltstack.push(m);
5792 break;
5793 }
5794 }
5795 }
5796 }
5797 pre_order++;
5798 }
5799 else if ( !is_postvisited(n) ) {
5800 // Note: build_loop_tree_impl() adds out edges on rare occasions,
5801 // such as com.sun.rsasign.am::a.
5802 // For non-recursive version, first, process current children.
5803 // On next iteration, check if additional children were added.
5804 for ( int k = n->outcnt() - 1; k >= 0; --k ) {
5805 Node* u = n->raw_out(k);
5806 if ( u->is_CFG() && !is_visited(u) ) {
5807 bltstack.push(u);
5808 }
5809 }
5810 if ( bltstack.length() == stack_size ) {
5811 // There were no additional children, post visit node now
5812 (void)bltstack.pop(); // Remove node from stack
5813 pre_order = build_loop_tree_impl(n, pre_order);
5814 // Check for bailout
5815 if (C->failing()) {
5816 return;
5817 }
5818 // Check to grow _preorders[] array for the case when
5819 // build_loop_tree_impl() adds new nodes.
5820 check_grow_preorders();
5821 }
5822 }
5823 else {
5824 (void)bltstack.pop(); // Remove post-visited node from stack
5825 }
5826 }
5827 DEBUG_ONLY(verify_regions_in_irreducible_loops();)
5828 }
5829
5830 //------------------------------build_loop_tree_impl---------------------------
5831 int PhaseIdealLoop::build_loop_tree_impl(Node* n, int pre_order) {
5832 // ---- Post-pass Work ----
5833 // Pre-walked but not post-walked nodes need a pre_order number.
5834
5835 // Tightest enclosing loop for this Node
5836 IdealLoopTree *innermost = nullptr;
5837
5838 // For all children, see if any edge is a backedge. If so, make a loop
5839 // for it. Then find the tightest enclosing loop for the self Node.
5840 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5841 Node* m = n->fast_out(i); // Child
5842 if (n == m) continue; // Ignore control self-cycles
5843 if (!m->is_CFG()) continue;// Ignore non-CFG edges
5844
5845 IdealLoopTree *l; // Child's loop
5846 if (!is_postvisited(m)) { // Child visited but not post-visited?
5847 // Found a backedge
5848 assert(get_preorder(m) < pre_order, "should be backedge");
5849 // Check for the RootNode, which is already a LoopNode and is allowed
5850 // to have multiple "backedges".
5851 if (m == C->root()) { // Found the root?
5852 l = _ltree_root; // Root is the outermost LoopNode
5853 } else { // Else found a nested loop
5854 // Insert a LoopNode to mark this loop.
5855 l = new IdealLoopTree(this, m, n);
5856 } // End of Else found a nested loop
5857 if (!has_loop(m)) { // If 'm' does not already have a loop set
5858 set_loop(m, l); // Set loop header to loop now
5859 }
5860 } else { // Else not a nested loop
5861 if (!_loop_or_ctrl[m->_idx]) continue; // Dead code has no loop
5862 IdealLoopTree* m_loop = get_loop(m);
5863 l = m_loop; // Get previously determined loop
5864 // If successor is header of a loop (nest), move up-loop till it
5865 // is a member of some outer enclosing loop. Since there are no
5866 // shared headers (I've split them already) I only need to go up
5867 // at most 1 level.
5868 while (l && l->_head == m) { // Successor heads loop?
5869 l = l->_parent; // Move up 1 for me
5870 }
5871 // If this loop is not properly parented, then this loop
5872 // has no exit path out, i.e. its an infinite loop.
5873 if (!l) {
5874 // Make loop "reachable" from root so the CFG is reachable. Basically
5875 // insert a bogus loop exit that is never taken. 'm', the loop head,
5876 // points to 'n', one (of possibly many) fall-in paths. There may be
5877 // many backedges as well.
5878
5879 if (!_verify_only) {
5880 // Insert the NeverBranch between 'm' and it's control user.
5881 NeverBranchNode *iff = new NeverBranchNode( m );
5882 _igvn.register_new_node_with_optimizer(iff);
5883 set_loop(iff, m_loop);
5884 Node *if_t = new CProjNode( iff, 0 );
5885 _igvn.register_new_node_with_optimizer(if_t);
5886 set_loop(if_t, m_loop);
5887
5888 Node* cfg = nullptr; // Find the One True Control User of m
5889 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
5890 Node* x = m->fast_out(j);
5891 if (x->is_CFG() && x != m && x != iff)
5892 { cfg = x; break; }
5893 }
5894 assert(cfg != nullptr, "must find the control user of m");
5895 uint k = 0; // Probably cfg->in(0)
5896 while( cfg->in(k) != m ) k++; // But check in case cfg is a Region
5897 _igvn.replace_input_of(cfg, k, if_t); // Now point to NeverBranch
5898
5899 // Now create the never-taken loop exit
5900 Node *if_f = new CProjNode( iff, 1 );
5901 _igvn.register_new_node_with_optimizer(if_f);
5902 set_loop(if_f, _ltree_root);
5903 // Find frame ptr for Halt. Relies on the optimizer
5904 // V-N'ing. Easier and quicker than searching through
5905 // the program structure.
5906 Node *frame = new ParmNode( C->start(), TypeFunc::FramePtr );
5907 _igvn.register_new_node_with_optimizer(frame);
5908 // Halt & Catch Fire
5909 Node* halt = new HaltNode(if_f, frame, "never-taken loop exit reached");
5910 _igvn.register_new_node_with_optimizer(halt);
5911 set_loop(halt, _ltree_root);
5912 _igvn.add_input_to(C->root(), halt);
5913 }
5914 set_loop(C->root(), _ltree_root);
5915 // move to outer most loop with same header
5916 l = m_loop;
5917 while (true) {
5918 IdealLoopTree* next = l->_parent;
5919 if (next == nullptr || next->_head != m) {
5920 break;
5921 }
5922 l = next;
5923 }
5924 // properly insert infinite loop in loop tree
5925 sort(_ltree_root, l);
5926 // fix child link from parent
5927 IdealLoopTree* p = l->_parent;
5928 l->_next = p->_child;
5929 p->_child = l;
5930 // code below needs enclosing loop
5931 l = l->_parent;
5932 }
5933 }
5934 if (is_postvisited(l->_head)) {
5935 // We are currently visiting l, but its head has already been post-visited.
5936 // l is irreducible: we just found a second entry m.
5937 _has_irreducible_loops = true;
5938 RegionNode* secondary_entry = m->as_Region();
5939
5940 if (!secondary_entry->can_be_irreducible_entry()) {
5941 assert(!VerifyNoNewIrreducibleLoops, "A new irreducible loop was created after parsing.");
5942 C->record_method_not_compilable("A new irreducible loop was created after parsing.");
5943 return pre_order;
5944 }
5945
5946 // Walk up the loop-tree, mark all loops that are already post-visited as irreducible
5947 // Since m is a secondary entry to them all.
5948 while( is_postvisited(l->_head) ) {
5949 l->_irreducible = 1; // = true
5950 RegionNode* head = l->_head->as_Region();
5951 if (!head->can_be_irreducible_entry()) {
5952 assert(!VerifyNoNewIrreducibleLoops, "A new irreducible loop was created after parsing.");
5953 C->record_method_not_compilable("A new irreducible loop was created after parsing.");
5954 return pre_order;
5955 }
5956 l = l->_parent;
5957 // Check for bad CFG here to prevent crash, and bailout of compile
5958 if (l == nullptr) {
5959 #ifndef PRODUCT
5960 if (TraceLoopOpts) {
5961 tty->print_cr("bailout: unhandled CFG: infinite irreducible loop");
5962 m->dump();
5963 }
5964 #endif
5965 // This is a rare case that we do not want to handle in C2.
5966 C->record_method_not_compilable("unhandled CFG detected during loop optimization");
5967 return pre_order;
5968 }
5969 }
5970 }
5971 if (!_verify_only) {
5972 C->set_has_irreducible_loop(_has_irreducible_loops);
5973 }
5974
5975 // This Node might be a decision point for loops. It is only if
5976 // it's children belong to several different loops. The sort call
5977 // does a trivial amount of work if there is only 1 child or all
5978 // children belong to the same loop. If however, the children
5979 // belong to different loops, the sort call will properly set the
5980 // _parent pointers to show how the loops nest.
5981 //
5982 // In any case, it returns the tightest enclosing loop.
5983 innermost = sort( l, innermost );
5984 }
5985
5986 // Def-use info will have some dead stuff; dead stuff will have no
5987 // loop decided on.
5988
5989 // Am I a loop header? If so fix up my parent's child and next ptrs.
5990 if( innermost && innermost->_head == n ) {
5991 assert( get_loop(n) == innermost, "" );
5992 IdealLoopTree *p = innermost->_parent;
5993 IdealLoopTree *l = innermost;
5994 while (p && l->_head == n) {
5995 l->_next = p->_child; // Put self on parents 'next child'
5996 p->_child = l; // Make self as first child of parent
5997 l = p; // Now walk up the parent chain
5998 p = l->_parent;
5999 }
6000 } else {
6001 // Note that it is possible for a LoopNode to reach here, if the
6002 // backedge has been made unreachable (hence the LoopNode no longer
6003 // denotes a Loop, and will eventually be removed).
6004
6005 // Record tightest enclosing loop for self. Mark as post-visited.
6006 set_loop(n, innermost);
6007 // Also record has_call flag early on
6008 if (innermost) {
6009 if( n->is_Call() && !n->is_CallLeaf() && !n->is_macro() ) {
6010 // Do not count uncommon calls
6011 if( !n->is_CallStaticJava() || !n->as_CallStaticJava()->_name ) {
6012 Node *iff = n->in(0)->in(0);
6013 // No any calls for vectorized loops.
6014 if (C->do_superword() ||
6015 !iff->is_If() ||
6016 (n->in(0)->Opcode() == Op_IfFalse && (1.0 - iff->as_If()->_prob) >= 0.01) ||
6017 iff->as_If()->_prob >= 0.01) {
6018 innermost->_has_call = 1;
6019 }
6020 }
6021 } else if( n->is_Allocate() && n->as_Allocate()->_is_scalar_replaceable ) {
6022 // Disable loop optimizations if the loop has a scalar replaceable
6023 // allocation. This disabling may cause a potential performance lost
6024 // if the allocation is not eliminated for some reason.
6025 innermost->_allow_optimizations = false;
6026 innermost->_has_call = 1; // = true
6027 } else if (n->Opcode() == Op_SafePoint) {
6028 // Record all safepoints in this loop.
6029 if (innermost->_safepts == nullptr) innermost->_safepts = new Node_List();
6030 innermost->_safepts->push(n);
6031 }
6032 }
6033 }
6034
6035 // Flag as post-visited now
6036 set_postvisited(n);
6037 return pre_order;
6038 }
6039
6040 #ifdef ASSERT
6041 //--------------------------verify_regions_in_irreducible_loops----------------
6042 // Iterate down from Root through CFG, verify for every region:
6043 // if it is in an irreducible loop it must be marked as such
6044 void PhaseIdealLoop::verify_regions_in_irreducible_loops() {
6045 ResourceMark rm;
6046 if (!_has_irreducible_loops) {
6047 // last build_loop_tree has not found any irreducible loops
6048 // hence no region has to be marked is_in_irreduible_loop
6049 return;
6050 }
6051
6052 RootNode* root = C->root();
6053 Unique_Node_List worklist; // visit all nodes once
6054 worklist.push(root);
6055 bool failure = false;
6056 for (uint i = 0; i < worklist.size(); i++) {
6057 Node* n = worklist.at(i);
6058 if (n->is_Region()) {
6059 RegionNode* region = n->as_Region();
6060 if (is_in_irreducible_loop(region) &&
6061 region->loop_status() == RegionNode::LoopStatus::Reducible) {
6062 failure = true;
6063 tty->print("irreducible! ");
6064 region->dump();
6065 }
6066 }
6067 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
6068 Node* use = n->fast_out(j);
6069 if (use->is_CFG()) {
6070 worklist.push(use); // push if was not pushed before
6071 }
6072 }
6073 }
6074 assert(!failure, "region in irreducible loop was marked as reducible");
6075 }
6076
6077 //---------------------------is_in_irreducible_loop-------------------------
6078 // Analogous to ciTypeFlow::Block::is_in_irreducible_loop
6079 bool PhaseIdealLoop::is_in_irreducible_loop(RegionNode* region) {
6080 if (!_has_irreducible_loops) {
6081 return false; // no irreducible loop in graph
6082 }
6083 IdealLoopTree* l = get_loop(region); // l: innermost loop that contains region
6084 do {
6085 if (l->_irreducible) {
6086 return true; // found it
6087 }
6088 if (l == _ltree_root) {
6089 return false; // reached root, terimnate
6090 }
6091 l = l->_parent;
6092 } while (l != nullptr);
6093 assert(region->is_in_infinite_subgraph(), "must be in infinite subgraph");
6094 // We have "l->_parent == nullptr", which happens only for infinite loops,
6095 // where no parent is attached to the loop. We did not find any irreducible
6096 // loop from this block out to lp. Thus lp only has one entry, and no exit
6097 // (it is infinite and reducible). We can always rewrite an infinite loop
6098 // that is nested inside other loops:
6099 // while(condition) { infinite_loop; }
6100 // with an equivalent program where the infinite loop is an outermost loop
6101 // that is not nested in any loop:
6102 // while(condition) { break; } infinite_loop;
6103 // Thus, we can understand lp as an outermost loop, and can terminate and
6104 // conclude: this block is in no irreducible loop.
6105 return false;
6106 }
6107 #endif
6108
6109 //------------------------------build_loop_early-------------------------------
6110 // Put Data nodes into some loop nest, by setting the _loop_or_ctrl[]->loop mapping.
6111 // First pass computes the earliest controlling node possible. This is the
6112 // controlling input with the deepest dominating depth.
6113 void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) {
6114 while (worklist.size() != 0) {
6115 // Use local variables nstack_top_n & nstack_top_i to cache values
6116 // on nstack's top.
6117 Node *nstack_top_n = worklist.pop();
6118 uint nstack_top_i = 0;
6119 //while_nstack_nonempty:
6120 while (true) {
6121 // Get parent node and next input's index from stack's top.
6122 Node *n = nstack_top_n;
6123 uint i = nstack_top_i;
6124 uint cnt = n->req(); // Count of inputs
6125 if (i == 0) { // Pre-process the node.
6126 if( has_node(n) && // Have either loop or control already?
6127 !has_ctrl(n) ) { // Have loop picked out already?
6128 // During "merge_many_backedges" we fold up several nested loops
6129 // into a single loop. This makes the members of the original
6130 // loop bodies pointing to dead loops; they need to move up
6131 // to the new UNION'd larger loop. I set the _head field of these
6132 // dead loops to null and the _parent field points to the owning
6133 // loop. Shades of UNION-FIND algorithm.
6134 IdealLoopTree *ilt;
6135 while( !(ilt = get_loop(n))->_head ) {
6136 // Normally I would use a set_loop here. But in this one special
6137 // case, it is legal (and expected) to change what loop a Node
6138 // belongs to.
6139 _loop_or_ctrl.map(n->_idx, (Node*)(ilt->_parent));
6140 }
6141 // Remove safepoints ONLY if I've already seen I don't need one.
6142 // (the old code here would yank a 2nd safepoint after seeing a
6143 // first one, even though the 1st did not dominate in the loop body
6144 // and thus could be avoided indefinitely)
6145 if( !_verify_only && !_verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint &&
6146 is_deleteable_safept(n)) {
6147 Node *in = n->in(TypeFunc::Control);
6148 lazy_replace(n,in); // Pull safepoint now
6149 if (ilt->_safepts != nullptr) {
6150 ilt->_safepts->yank(n);
6151 }
6152 // Carry on with the recursion "as if" we are walking
6153 // only the control input
6154 if( !visited.test_set( in->_idx ) ) {
6155 worklist.push(in); // Visit this guy later, using worklist
6156 }
6157 // Get next node from nstack:
6158 // - skip n's inputs processing by setting i > cnt;
6159 // - we also will not call set_early_ctrl(n) since
6160 // has_node(n) == true (see the condition above).
6161 i = cnt + 1;
6162 }
6163 }
6164 } // if (i == 0)
6165
6166 // Visit all inputs
6167 bool done = true; // Assume all n's inputs will be processed
6168 while (i < cnt) {
6169 Node *in = n->in(i);
6170 ++i;
6171 if (in == nullptr) continue;
6172 if (in->pinned() && !in->is_CFG())
6173 set_ctrl(in, in->in(0));
6174 int is_visited = visited.test_set( in->_idx );
6175 if (!has_node(in)) { // No controlling input yet?
6176 assert( !in->is_CFG(), "CFG Node with no controlling input?" );
6177 assert( !is_visited, "visit only once" );
6178 nstack.push(n, i); // Save parent node and next input's index.
6179 nstack_top_n = in; // Process current input now.
6180 nstack_top_i = 0;
6181 done = false; // Not all n's inputs processed.
6182 break; // continue while_nstack_nonempty;
6183 } else if (!is_visited) {
6184 // This guy has a location picked out for him, but has not yet
6185 // been visited. Happens to all CFG nodes, for instance.
6186 // Visit him using the worklist instead of recursion, to break
6187 // cycles. Since he has a location already we do not need to
6188 // find his location before proceeding with the current Node.
6189 worklist.push(in); // Visit this guy later, using worklist
6190 }
6191 }
6192 if (done) {
6193 // All of n's inputs have been processed, complete post-processing.
6194
6195 // Compute earliest point this Node can go.
6196 // CFG, Phi, pinned nodes already know their controlling input.
6197 if (!has_node(n)) {
6198 // Record earliest legal location
6199 set_early_ctrl(n, false);
6200 }
6201 if (nstack.is_empty()) {
6202 // Finished all nodes on stack.
6203 // Process next node on the worklist.
6204 break;
6205 }
6206 // Get saved parent node and next input's index.
6207 nstack_top_n = nstack.node();
6208 nstack_top_i = nstack.index();
6209 nstack.pop();
6210 }
6211 } // while (true)
6212 }
6213 }
6214
6215 //------------------------------dom_lca_internal--------------------------------
6216 // Pair-wise LCA
6217 Node *PhaseIdealLoop::dom_lca_internal( Node *n1, Node *n2 ) const {
6218 if( !n1 ) return n2; // Handle null original LCA
6219 assert( n1->is_CFG(), "" );
6220 assert( n2->is_CFG(), "" );
6221 // find LCA of all uses
6222 uint d1 = dom_depth(n1);
6223 uint d2 = dom_depth(n2);
6224 while (n1 != n2) {
6225 if (d1 > d2) {
6226 n1 = idom(n1);
6227 d1 = dom_depth(n1);
6228 } else if (d1 < d2) {
6229 n2 = idom(n2);
6230 d2 = dom_depth(n2);
6231 } else {
6232 // Here d1 == d2. Due to edits of the dominator-tree, sections
6233 // of the tree might have the same depth. These sections have
6234 // to be searched more carefully.
6235
6236 // Scan up all the n1's with equal depth, looking for n2.
6237 Node *t1 = idom(n1);
6238 while (dom_depth(t1) == d1) {
6239 if (t1 == n2) return n2;
6240 t1 = idom(t1);
6241 }
6242 // Scan up all the n2's with equal depth, looking for n1.
6243 Node *t2 = idom(n2);
6244 while (dom_depth(t2) == d2) {
6245 if (t2 == n1) return n1;
6246 t2 = idom(t2);
6247 }
6248 // Move up to a new dominator-depth value as well as up the dom-tree.
6249 n1 = t1;
6250 n2 = t2;
6251 d1 = dom_depth(n1);
6252 d2 = dom_depth(n2);
6253 }
6254 }
6255 return n1;
6256 }
6257
6258 //------------------------------compute_idom-----------------------------------
6259 // Locally compute IDOM using dom_lca call. Correct only if the incoming
6260 // IDOMs are correct.
6261 Node *PhaseIdealLoop::compute_idom( Node *region ) const {
6262 assert( region->is_Region(), "" );
6263 Node *LCA = nullptr;
6264 for( uint i = 1; i < region->req(); i++ ) {
6265 if( region->in(i) != C->top() )
6266 LCA = dom_lca( LCA, region->in(i) );
6267 }
6268 return LCA;
6269 }
6270
6271 bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early) {
6272 bool had_error = false;
6273 #ifdef ASSERT
6274 if (early != C->root()) {
6275 // Make sure that there's a dominance path from LCA to early
6276 Node* d = LCA;
6277 while (d != early) {
6278 if (d == C->root()) {
6279 dump_bad_graph("Bad graph detected in compute_lca_of_uses", n, early, LCA);
6280 tty->print_cr("*** Use %d isn't dominated by def %d ***", use->_idx, n->_idx);
6281 had_error = true;
6282 break;
6283 }
6284 d = idom(d);
6285 }
6286 }
6287 #endif
6288 return had_error;
6289 }
6290
6291
6292 Node* PhaseIdealLoop::compute_lca_of_uses(Node* n, Node* early, bool verify) {
6293 // Compute LCA over list of uses
6294 bool had_error = false;
6295 Node *LCA = nullptr;
6296 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && LCA != early; i++) {
6297 Node* c = n->fast_out(i);
6298 if (_loop_or_ctrl[c->_idx] == nullptr)
6299 continue; // Skip the occasional dead node
6300 if( c->is_Phi() ) { // For Phis, we must land above on the path
6301 for( uint j=1; j<c->req(); j++ ) {// For all inputs
6302 if( c->in(j) == n ) { // Found matching input?
6303 Node *use = c->in(0)->in(j);
6304 if (_verify_only && use->is_top()) continue;
6305 LCA = dom_lca_for_get_late_ctrl( LCA, use, n );
6306 if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error;
6307 }
6308 }
6309 } else {
6310 // For CFG data-users, use is in the block just prior
6311 Node *use = has_ctrl(c) ? get_ctrl(c) : c->in(0);
6312 LCA = dom_lca_for_get_late_ctrl( LCA, use, n );
6313 if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error;
6314 }
6315 }
6316 assert(!had_error, "bad dominance");
6317 return LCA;
6318 }
6319
6320 // Check the shape of the graph at the loop entry. In some cases,
6321 // the shape of the graph does not match the shape outlined below.
6322 // That is caused by the Opaque1 node "protecting" the shape of
6323 // the graph being removed by, for example, the IGVN performed
6324 // in PhaseIdealLoop::build_and_optimize().
6325 //
6326 // After the Opaque1 node has been removed, optimizations (e.g., split-if,
6327 // loop unswitching, and IGVN, or a combination of them) can freely change
6328 // the graph's shape. As a result, the graph shape outlined below cannot
6329 // be guaranteed anymore.
6330 Node* CountedLoopNode::is_canonical_loop_entry() {
6331 if (!is_main_loop() && !is_post_loop()) {
6332 return nullptr;
6333 }
6334 Node* ctrl = skip_assertion_predicates_with_halt();
6335
6336 if (ctrl == nullptr || (!ctrl->is_IfTrue() && !ctrl->is_IfFalse())) {
6337 return nullptr;
6338 }
6339 Node* iffm = ctrl->in(0);
6340 if (iffm == nullptr || iffm->Opcode() != Op_If) {
6341 return nullptr;
6342 }
6343 Node* bolzm = iffm->in(1);
6344 if (bolzm == nullptr || !bolzm->is_Bool()) {
6345 return nullptr;
6346 }
6347 Node* cmpzm = bolzm->in(1);
6348 if (cmpzm == nullptr || !cmpzm->is_Cmp()) {
6349 return nullptr;
6350 }
6351
6352 uint input = is_main_loop() ? 2 : 1;
6353 if (input >= cmpzm->req() || cmpzm->in(input) == nullptr) {
6354 return nullptr;
6355 }
6356 bool res = cmpzm->in(input)->Opcode() == Op_OpaqueZeroTripGuard;
6357 #ifdef ASSERT
6358 bool found_opaque = false;
6359 for (uint i = 1; i < cmpzm->req(); i++) {
6360 Node* opnd = cmpzm->in(i);
6361 if (opnd && opnd->is_Opaque1()) {
6362 found_opaque = true;
6363 break;
6364 }
6365 }
6366 assert(found_opaque == res, "wrong pattern");
6367 #endif
6368 return res ? cmpzm->in(input) : nullptr;
6369 }
6370
6371 // Find pre loop end from main loop. Returns nullptr if none.
6372 CountedLoopEndNode* CountedLoopNode::find_pre_loop_end() {
6373 assert(is_main_loop(), "Can only find pre-loop from main-loop");
6374 // The loop cannot be optimized if the graph shape at the loop entry is
6375 // inappropriate.
6376 if (is_canonical_loop_entry() == nullptr) {
6377 return nullptr;
6378 }
6379
6380 Node* p_f = skip_assertion_predicates_with_halt()->in(0)->in(0);
6381 if (!p_f->is_IfFalse() || !p_f->in(0)->is_CountedLoopEnd()) {
6382 return nullptr;
6383 }
6384 CountedLoopEndNode* pre_end = p_f->in(0)->as_CountedLoopEnd();
6385 CountedLoopNode* loop_node = pre_end->loopnode();
6386 if (loop_node == nullptr || !loop_node->is_pre_loop()) {
6387 return nullptr;
6388 }
6389 return pre_end;
6390 }
6391
6392 Node* CountedLoopNode::uncasted_init_trip(bool uncast) {
6393 Node* init = init_trip();
6394 if (uncast && init->is_CastII()) {
6395 // skip over the cast added by PhaseIdealLoop::cast_incr_before_loop() when pre/post/main loops are created because
6396 // it can get in the way of type propagation. For instance, the index tested by an Assertion Predicate, if the cast
6397 // is not skipped over, could be (1):
6398 // (AddI (CastII (AddI pre_loop_iv -2) int) 1)
6399 // while without the cast, it is (2):
6400 // (AddI (AddI pre_loop_iv -2) 1)
6401 // which is be transformed to (3):
6402 // (AddI pre_loop_iv -1)
6403 // The compiler may be able to constant fold the Assertion Predicate condition for (3) but not (1)
6404 assert(init->as_CastII()->carry_dependency() && skip_assertion_predicates_with_halt() == init->in(0), "casted iv phi from pre loop expected");
6405 init = init->in(1);
6406 }
6407 return init;
6408 }
6409
6410 //------------------------------get_late_ctrl----------------------------------
6411 // Compute latest legal control.
6412 Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) {
6413 assert(early != nullptr, "early control should not be null");
6414
6415 Node* LCA = compute_lca_of_uses(n, early);
6416 #ifdef ASSERT
6417 if (LCA == C->root() && LCA != early) {
6418 // def doesn't dominate uses so print some useful debugging output
6419 compute_lca_of_uses(n, early, true);
6420 }
6421 #endif
6422
6423 if (n->is_Load() && LCA != early) {
6424 LCA = get_late_ctrl_with_anti_dep(n->as_Load(), early, LCA);
6425 }
6426
6427 assert(LCA == find_non_split_ctrl(LCA), "unexpected late control");
6428 return LCA;
6429 }
6430
6431 // if this is a load, check for anti-dependent stores
6432 // We use a conservative algorithm to identify potential interfering
6433 // instructions and for rescheduling the load. The users of the memory
6434 // input of this load are examined. Any use which is not a load and is
6435 // dominated by early is considered a potentially interfering store.
6436 // This can produce false positives.
6437 Node* PhaseIdealLoop::get_late_ctrl_with_anti_dep(LoadNode* n, Node* early, Node* LCA) {
6438 int load_alias_idx = C->get_alias_index(n->adr_type());
6439 if (C->alias_type(load_alias_idx)->is_rewritable()) {
6440 Unique_Node_List worklist;
6441
6442 Node* mem = n->in(MemNode::Memory);
6443 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
6444 Node* s = mem->fast_out(i);
6445 worklist.push(s);
6446 }
6447 for (uint i = 0; i < worklist.size() && LCA != early; i++) {
6448 Node* s = worklist.at(i);
6449 if (s->is_Load() || s->Opcode() == Op_SafePoint ||
6450 (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0) ||
6451 s->is_Phi()) {
6452 continue;
6453 } else if (s->is_MergeMem()) {
6454 for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
6455 Node* s1 = s->fast_out(i);
6456 worklist.push(s1);
6457 }
6458 } else {
6459 Node* sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0);
6460 assert(sctrl != nullptr || !s->is_reachable_from_root(), "must have control");
6461 if (sctrl != nullptr && !sctrl->is_top() && is_dominator(early, sctrl)) {
6462 const TypePtr* adr_type = s->adr_type();
6463 if (s->is_ArrayCopy()) {
6464 // Copy to known instance needs destination type to test for aliasing
6465 const TypePtr* dest_type = s->as_ArrayCopy()->_dest_type;
6466 if (dest_type != TypeOopPtr::BOTTOM) {
6467 adr_type = dest_type;
6468 }
6469 }
6470 if (C->can_alias(adr_type, load_alias_idx)) {
6471 LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n);
6472 } else if (s->is_CFG() && s->is_Multi()) {
6473 // Look for the memory use of s (that is the use of its memory projection)
6474 for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
6475 Node* s1 = s->fast_out(i);
6476 assert(s1->is_Proj(), "projection expected");
6477 if (_igvn.type(s1) == Type::MEMORY) {
6478 for (DUIterator_Fast jmax, j = s1->fast_outs(jmax); j < jmax; j++) {
6479 Node* s2 = s1->fast_out(j);
6480 worklist.push(s2);
6481 }
6482 }
6483 }
6484 }
6485 }
6486 }
6487 }
6488 // For Phis only consider Region's inputs that were reached by following the memory edges
6489 if (LCA != early) {
6490 for (uint i = 0; i < worklist.size(); i++) {
6491 Node* s = worklist.at(i);
6492 if (s->is_Phi() && C->can_alias(s->adr_type(), load_alias_idx)) {
6493 Node* r = s->in(0);
6494 for (uint j = 1; j < s->req(); j++) {
6495 Node* in = s->in(j);
6496 Node* r_in = r->in(j);
6497 // We can't reach any node from a Phi because we don't enqueue Phi's uses above
6498 if (((worklist.member(in) && !in->is_Phi()) || in == mem) && is_dominator(early, r_in)) {
6499 LCA = dom_lca_for_get_late_ctrl(LCA, r_in, n);
6500 }
6501 }
6502 }
6503 }
6504 }
6505 }
6506 return LCA;
6507 }
6508
6509 // Is CFG node 'dominator' dominating node 'n'?
6510 bool PhaseIdealLoop::is_dominator(Node* dominator, Node* n) {
6511 if (dominator == n) {
6512 return true;
6513 }
6514 assert(dominator->is_CFG() && n->is_CFG(), "must have CFG nodes");
6515 uint dd = dom_depth(dominator);
6516 while (dom_depth(n) >= dd) {
6517 if (n == dominator) {
6518 return true;
6519 }
6520 n = idom(n);
6521 }
6522 return false;
6523 }
6524
6525 // Is CFG node 'dominator' strictly dominating node 'n'?
6526 bool PhaseIdealLoop::is_strict_dominator(Node* dominator, Node* n) {
6527 return dominator != n && is_dominator(dominator, n);
6528 }
6529
6530 //------------------------------dom_lca_for_get_late_ctrl_internal-------------
6531 // Pair-wise LCA with tags.
6532 // Tag each index with the node 'tag' currently being processed
6533 // before advancing up the dominator chain using idom().
6534 // Later calls that find a match to 'tag' know that this path has already
6535 // been considered in the current LCA (which is input 'n1' by convention).
6536 // Since get_late_ctrl() is only called once for each node, the tag array
6537 // does not need to be cleared between calls to get_late_ctrl().
6538 // Algorithm trades a larger constant factor for better asymptotic behavior
6539 //
6540 Node *PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal(Node *n1, Node *n2, Node *tag_node) {
6541 uint d1 = dom_depth(n1);
6542 uint d2 = dom_depth(n2);
6543 jlong tag = tag_node->_idx | (((jlong)_dom_lca_tags_round) << 32);
6544
6545 do {
6546 if (d1 > d2) {
6547 // current lca is deeper than n2
6548 _dom_lca_tags.at_put_grow(n1->_idx, tag);
6549 n1 = idom(n1);
6550 d1 = dom_depth(n1);
6551 } else if (d1 < d2) {
6552 // n2 is deeper than current lca
6553 jlong memo = _dom_lca_tags.at_grow(n2->_idx, 0);
6554 if (memo == tag) {
6555 return n1; // Return the current LCA
6556 }
6557 _dom_lca_tags.at_put_grow(n2->_idx, tag);
6558 n2 = idom(n2);
6559 d2 = dom_depth(n2);
6560 } else {
6561 // Here d1 == d2. Due to edits of the dominator-tree, sections
6562 // of the tree might have the same depth. These sections have
6563 // to be searched more carefully.
6564
6565 // Scan up all the n1's with equal depth, looking for n2.
6566 _dom_lca_tags.at_put_grow(n1->_idx, tag);
6567 Node *t1 = idom(n1);
6568 while (dom_depth(t1) == d1) {
6569 if (t1 == n2) return n2;
6570 _dom_lca_tags.at_put_grow(t1->_idx, tag);
6571 t1 = idom(t1);
6572 }
6573 // Scan up all the n2's with equal depth, looking for n1.
6574 _dom_lca_tags.at_put_grow(n2->_idx, tag);
6575 Node *t2 = idom(n2);
6576 while (dom_depth(t2) == d2) {
6577 if (t2 == n1) return n1;
6578 _dom_lca_tags.at_put_grow(t2->_idx, tag);
6579 t2 = idom(t2);
6580 }
6581 // Move up to a new dominator-depth value as well as up the dom-tree.
6582 n1 = t1;
6583 n2 = t2;
6584 d1 = dom_depth(n1);
6585 d2 = dom_depth(n2);
6586 }
6587 } while (n1 != n2);
6588 return n1;
6589 }
6590
6591 //------------------------------init_dom_lca_tags------------------------------
6592 // Tag could be a node's integer index, 32bits instead of 64bits in some cases
6593 // Intended use does not involve any growth for the array, so it could
6594 // be of fixed size.
6595 void PhaseIdealLoop::init_dom_lca_tags() {
6596 uint limit = C->unique() + 1;
6597 _dom_lca_tags.at_grow(limit, 0);
6598 _dom_lca_tags_round = 0;
6599 #ifdef ASSERT
6600 for (uint i = 0; i < limit; ++i) {
6601 assert(_dom_lca_tags.at(i) == 0, "Must be distinct from each node pointer");
6602 }
6603 #endif // ASSERT
6604 }
6605
6606 //------------------------------build_loop_late--------------------------------
6607 // Put Data nodes into some loop nest, by setting the _loop_or_ctrl[]->loop mapping.
6608 // Second pass finds latest legal placement, and ideal loop placement.
6609 void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) {
6610 while (worklist.size() != 0) {
6611 Node *n = worklist.pop();
6612 // Only visit once
6613 if (visited.test_set(n->_idx)) continue;
6614 uint cnt = n->outcnt();
6615 uint i = 0;
6616 while (true) {
6617 assert(_loop_or_ctrl[n->_idx], "no dead nodes");
6618 // Visit all children
6619 if (i < cnt) {
6620 Node* use = n->raw_out(i);
6621 ++i;
6622 // Check for dead uses. Aggressively prune such junk. It might be
6623 // dead in the global sense, but still have local uses so I cannot
6624 // easily call 'remove_dead_node'.
6625 if (_loop_or_ctrl[use->_idx] != nullptr || use->is_top()) { // Not dead?
6626 // Due to cycles, we might not hit the same fixed point in the verify
6627 // pass as we do in the regular pass. Instead, visit such phis as
6628 // simple uses of the loop head.
6629 if( use->in(0) && (use->is_CFG() || use->is_Phi()) ) {
6630 if( !visited.test(use->_idx) )
6631 worklist.push(use);
6632 } else if( !visited.test_set(use->_idx) ) {
6633 nstack.push(n, i); // Save parent and next use's index.
6634 n = use; // Process all children of current use.
6635 cnt = use->outcnt();
6636 i = 0;
6637 }
6638 } else {
6639 // Do not visit around the backedge of loops via data edges.
6640 // push dead code onto a worklist
6641 _deadlist.push(use);
6642 }
6643 } else {
6644 // All of n's children have been processed, complete post-processing.
6645 build_loop_late_post(n);
6646 if (C->failing()) { return; }
6647 if (nstack.is_empty()) {
6648 // Finished all nodes on stack.
6649 // Process next node on the worklist.
6650 break;
6651 }
6652 // Get saved parent node and next use's index. Visit the rest of uses.
6653 n = nstack.node();
6654 cnt = n->outcnt();
6655 i = nstack.index();
6656 nstack.pop();
6657 }
6658 }
6659 }
6660 }
6661
6662 // Verify that no data node is scheduled in the outer loop of a strip
6663 // mined loop.
6664 void PhaseIdealLoop::verify_strip_mined_scheduling(Node *n, Node* least) {
6665 #ifdef ASSERT
6666 if (get_loop(least)->_nest == 0) {
6667 return;
6668 }
6669 IdealLoopTree* loop = get_loop(least);
6670 Node* head = loop->_head;
6671 if (head->is_OuterStripMinedLoop() &&
6672 // Verification can't be applied to fully built strip mined loops
6673 head->as_Loop()->outer_loop_end()->in(1)->find_int_con(-1) == 0) {
6674 Node* sfpt = head->as_Loop()->outer_safepoint();
6675 ResourceMark rm;
6676 Unique_Node_List wq;
6677 wq.push(sfpt);
6678 for (uint i = 0; i < wq.size(); i++) {
6679 Node *m = wq.at(i);
6680 for (uint i = 1; i < m->req(); i++) {
6681 Node* nn = m->in(i);
6682 if (nn == n) {
6683 return;
6684 }
6685 if (nn != nullptr && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) {
6686 wq.push(nn);
6687 }
6688 }
6689 }
6690 ShouldNotReachHere();
6691 }
6692 #endif
6693 }
6694
6695
6696 //------------------------------build_loop_late_post---------------------------
6697 // Put Data nodes into some loop nest, by setting the _loop_or_ctrl[]->loop mapping.
6698 // Second pass finds latest legal placement, and ideal loop placement.
6699 void PhaseIdealLoop::build_loop_late_post(Node *n) {
6700 build_loop_late_post_work(n, true);
6701 }
6702
6703 // Class to visit all predicates in a predicate chain to find out which are dominated by a given node. Keeps track of
6704 // the entry to the earliest predicate that is still dominated by the given dominator. This class is used when trying to
6705 // legally skip all predicates when figuring out the latest placement such that a node does not interfere with Loop
6706 // Predication or creating a Loop Limit Check Predicate later.
6707 class DominatedPredicates : public UnifiedPredicateVisitor {
6708 Node* const _dominator;
6709 Node* _earliest_dominated_predicate_entry;
6710 bool _should_continue;
6711 PhaseIdealLoop* const _phase;
6712
6713 public:
6714 DominatedPredicates(Node* dominator, Node* start_node, PhaseIdealLoop* phase)
6715 : _dominator(dominator),
6716 _earliest_dominated_predicate_entry(start_node),
6717 _should_continue(true),
6718 _phase(phase) {}
6719 NONCOPYABLE(DominatedPredicates);
6720
6721 bool should_continue() const override {
6722 return _should_continue;
6723 }
6724
6725 // Returns the entry to the earliest predicate that is still dominated by the given dominator (all could be dominated).
6726 Node* earliest_dominated_predicate_entry() const {
6727 return _earliest_dominated_predicate_entry;
6728 }
6729
6730 void visit_predicate(const Predicate& predicate) override {
6731 Node* entry = predicate.entry();
6732 if (_phase->is_strict_dominator(entry, _dominator)) {
6733 _should_continue = false;
6734 } else {
6735 _earliest_dominated_predicate_entry = entry;
6736 }
6737 }
6738 };
6739
6740 void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) {
6741
6742 if (n->req() == 2 && (n->Opcode() == Op_ConvI2L || n->Opcode() == Op_CastII) && !C->major_progress() && !_verify_only) {
6743 _igvn._worklist.push(n); // Maybe we'll normalize it, if no more loops.
6744 }
6745
6746 #ifdef ASSERT
6747 if (_verify_only && !n->is_CFG()) {
6748 // Check def-use domination.
6749 // We would like to expose this check in product but it appears to be expensive.
6750 compute_lca_of_uses(n, get_ctrl(n), true /* verify */);
6751 }
6752 #endif
6753
6754 // CFG and pinned nodes already handled
6755 if( n->in(0) ) {
6756 if( n->in(0)->is_top() ) return; // Dead?
6757
6758 // We'd like +VerifyLoopOptimizations to not believe that Mod's/Loads
6759 // _must_ be pinned (they have to observe their control edge of course).
6760 // Unlike Stores (which modify an unallocable resource, the memory
6761 // state), Mods/Loads can float around. So free them up.
6762 switch( n->Opcode() ) {
6763 case Op_DivI:
6764 case Op_DivF:
6765 case Op_DivD:
6766 case Op_ModI:
6767 case Op_LoadB: // Same with Loads; they can sink
6768 case Op_LoadUB: // during loop optimizations.
6769 case Op_LoadUS:
6770 case Op_LoadD:
6771 case Op_LoadF:
6772 case Op_LoadI:
6773 case Op_LoadKlass:
6774 case Op_LoadNKlass:
6775 case Op_LoadL:
6776 case Op_LoadS:
6777 case Op_LoadP:
6778 case Op_LoadN:
6779 case Op_LoadRange:
6780 case Op_LoadD_unaligned:
6781 case Op_LoadL_unaligned:
6782 case Op_StrComp: // Does a bunch of load-like effects
6783 case Op_StrEquals:
6784 case Op_StrIndexOf:
6785 case Op_StrIndexOfChar:
6786 case Op_AryEq:
6787 case Op_VectorizedHashCode:
6788 case Op_CountPositives:
6789 pinned = false;
6790 }
6791 if (n->is_CMove() || n->is_ConstraintCast()) {
6792 pinned = false;
6793 }
6794 if( pinned ) {
6795 IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n));
6796 if( !chosen_loop->_child ) // Inner loop?
6797 chosen_loop->_body.push(n); // Collect inner loops
6798 return;
6799 }
6800 } else { // No slot zero
6801 if( n->is_CFG() ) { // CFG with no slot 0 is dead
6802 _loop_or_ctrl.map(n->_idx,nullptr); // No block setting, it's globally dead
6803 return;
6804 }
6805 assert(!n->is_CFG() || n->outcnt() == 0, "");
6806 }
6807
6808 // Do I have a "safe range" I can select over?
6809 Node *early = get_ctrl(n);// Early location already computed
6810
6811 // Compute latest point this Node can go
6812 Node *LCA = get_late_ctrl( n, early );
6813 // LCA is null due to uses being dead
6814 if( LCA == nullptr ) {
6815 #ifdef ASSERT
6816 for (DUIterator i1 = n->outs(); n->has_out(i1); i1++) {
6817 assert(_loop_or_ctrl[n->out(i1)->_idx] == nullptr, "all uses must also be dead");
6818 }
6819 #endif
6820 _loop_or_ctrl.map(n->_idx, nullptr); // This node is useless
6821 _deadlist.push(n);
6822 return;
6823 }
6824 assert(LCA != nullptr && !LCA->is_top(), "no dead nodes");
6825
6826 Node *legal = LCA; // Walk 'legal' up the IDOM chain
6827 Node *least = legal; // Best legal position so far
6828 while( early != legal ) { // While not at earliest legal
6829 if (legal->is_Start() && !early->is_Root()) {
6830 #ifdef ASSERT
6831 // Bad graph. Print idom path and fail.
6832 dump_bad_graph("Bad graph detected in build_loop_late", n, early, LCA);
6833 assert(false, "Bad graph detected in build_loop_late");
6834 #endif
6835 C->record_method_not_compilable("Bad graph detected in build_loop_late");
6836 return;
6837 }
6838 // Find least loop nesting depth
6839 legal = idom(legal); // Bump up the IDOM tree
6840 // Check for lower nesting depth
6841 if( get_loop(legal)->_nest < get_loop(least)->_nest )
6842 least = legal;
6843 }
6844 assert(early == legal || legal != C->root(), "bad dominance of inputs");
6845
6846 if (least != early) {
6847 // Move the node above predicates as far up as possible so a
6848 // following pass of Loop Predication doesn't hoist a predicate
6849 // that depends on it above that node.
6850 const PredicateIterator predicate_iterator(least);
6851 DominatedPredicates dominated_predicates(early, least, this);
6852 predicate_iterator.for_each(dominated_predicates);
6853 least = dominated_predicates.earliest_dominated_predicate_entry();
6854 }
6855 // Try not to place code on a loop entry projection
6856 // which can inhibit range check elimination.
6857 if (least != early && !BarrierSet::barrier_set()->barrier_set_c2()->is_gc_specific_loop_opts_pass(_mode)) {
6858 Node* ctrl_out = least->unique_ctrl_out_or_null();
6859 if (ctrl_out != nullptr && ctrl_out->is_Loop() &&
6860 least == ctrl_out->in(LoopNode::EntryControl) &&
6861 (ctrl_out->is_CountedLoop() || ctrl_out->is_OuterStripMinedLoop())) {
6862 Node* least_dom = idom(least);
6863 if (get_loop(least_dom)->is_member(get_loop(least))) {
6864 least = least_dom;
6865 }
6866 }
6867 }
6868 // Don't extend live ranges of raw oops
6869 if (least != early && n->is_ConstraintCast() && n->in(1)->bottom_type()->isa_rawptr() &&
6870 !n->bottom_type()->isa_rawptr()) {
6871 least = early;
6872 }
6873
6874 #ifdef ASSERT
6875 // Broken part of VerifyLoopOptimizations (F)
6876 // Reason:
6877 // _verify_me->get_ctrl_no_update(n) seems to return wrong result
6878 /*
6879 // If verifying, verify that 'verify_me' has a legal location
6880 // and choose it as our location.
6881 if( _verify_me ) {
6882 Node *v_ctrl = _verify_me->get_ctrl_no_update(n);
6883 Node *legal = LCA;
6884 while( early != legal ) { // While not at earliest legal
6885 if( legal == v_ctrl ) break; // Check for prior good location
6886 legal = idom(legal) ;// Bump up the IDOM tree
6887 }
6888 // Check for prior good location
6889 if( legal == v_ctrl ) least = legal; // Keep prior if found
6890 }
6891 */
6892 #endif
6893
6894 // Assign discovered "here or above" point
6895 least = find_non_split_ctrl(least);
6896 verify_strip_mined_scheduling(n, least);
6897 set_ctrl(n, least);
6898
6899 // Collect inner loop bodies
6900 IdealLoopTree *chosen_loop = get_loop(least);
6901 if( !chosen_loop->_child ) // Inner loop?
6902 chosen_loop->_body.push(n);// Collect inner loops
6903
6904 if (!_verify_only && n->Opcode() == Op_OpaqueZeroTripGuard) {
6905 _zero_trip_guard_opaque_nodes.push(n);
6906 }
6907
6908 if (!_verify_only && n->Opcode() == Op_OpaqueMultiversioning) {
6909 _multiversion_opaque_nodes.push(n);
6910 }
6911 }
6912
6913 #ifdef ASSERT
6914 void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA) {
6915 tty->print_cr("%s", msg);
6916 tty->print("n: "); n->dump();
6917 tty->print("early(n): "); early->dump();
6918 if (n->in(0) != nullptr && !n->in(0)->is_top() &&
6919 n->in(0) != early && !n->in(0)->is_Root()) {
6920 tty->print("n->in(0): "); n->in(0)->dump();
6921 }
6922 for (uint i = 1; i < n->req(); i++) {
6923 Node* in1 = n->in(i);
6924 if (in1 != nullptr && in1 != n && !in1->is_top()) {
6925 tty->print("n->in(%d): ", i); in1->dump();
6926 Node* in1_early = get_ctrl(in1);
6927 tty->print("early(n->in(%d)): ", i); in1_early->dump();
6928 if (in1->in(0) != nullptr && !in1->in(0)->is_top() &&
6929 in1->in(0) != in1_early && !in1->in(0)->is_Root()) {
6930 tty->print("n->in(%d)->in(0): ", i); in1->in(0)->dump();
6931 }
6932 for (uint j = 1; j < in1->req(); j++) {
6933 Node* in2 = in1->in(j);
6934 if (in2 != nullptr && in2 != n && in2 != in1 && !in2->is_top()) {
6935 tty->print("n->in(%d)->in(%d): ", i, j); in2->dump();
6936 Node* in2_early = get_ctrl(in2);
6937 tty->print("early(n->in(%d)->in(%d)): ", i, j); in2_early->dump();
6938 if (in2->in(0) != nullptr && !in2->in(0)->is_top() &&
6939 in2->in(0) != in2_early && !in2->in(0)->is_Root()) {
6940 tty->print("n->in(%d)->in(%d)->in(0): ", i, j); in2->in(0)->dump();
6941 }
6942 }
6943 }
6944 }
6945 }
6946 tty->cr();
6947 tty->print("LCA(n): "); LCA->dump();
6948 for (uint i = 0; i < n->outcnt(); i++) {
6949 Node* u1 = n->raw_out(i);
6950 if (u1 == n)
6951 continue;
6952 tty->print("n->out(%d): ", i); u1->dump();
6953 if (u1->is_CFG()) {
6954 for (uint j = 0; j < u1->outcnt(); j++) {
6955 Node* u2 = u1->raw_out(j);
6956 if (u2 != u1 && u2 != n && u2->is_CFG()) {
6957 tty->print("n->out(%d)->out(%d): ", i, j); u2->dump();
6958 }
6959 }
6960 } else {
6961 Node* u1_later = get_ctrl(u1);
6962 tty->print("later(n->out(%d)): ", i); u1_later->dump();
6963 if (u1->in(0) != nullptr && !u1->in(0)->is_top() &&
6964 u1->in(0) != u1_later && !u1->in(0)->is_Root()) {
6965 tty->print("n->out(%d)->in(0): ", i); u1->in(0)->dump();
6966 }
6967 for (uint j = 0; j < u1->outcnt(); j++) {
6968 Node* u2 = u1->raw_out(j);
6969 if (u2 == n || u2 == u1)
6970 continue;
6971 tty->print("n->out(%d)->out(%d): ", i, j); u2->dump();
6972 if (!u2->is_CFG()) {
6973 Node* u2_later = get_ctrl(u2);
6974 tty->print("later(n->out(%d)->out(%d)): ", i, j); u2_later->dump();
6975 if (u2->in(0) != nullptr && !u2->in(0)->is_top() &&
6976 u2->in(0) != u2_later && !u2->in(0)->is_Root()) {
6977 tty->print("n->out(%d)->in(0): ", i); u2->in(0)->dump();
6978 }
6979 }
6980 }
6981 }
6982 }
6983 dump_idoms(early, LCA);
6984 tty->cr();
6985 }
6986
6987 // Class to compute the real LCA given an early node and a wrong LCA in a bad graph.
6988 class RealLCA {
6989 const PhaseIdealLoop* _phase;
6990 Node* _early;
6991 Node* _wrong_lca;
6992 uint _early_index;
6993 int _wrong_lca_index;
6994
6995 // Given idom chains of early and wrong LCA: Walk through idoms starting at StartNode and find the first node which
6996 // is different: Return the previously visited node which must be the real LCA.
6997 // The node lists also contain _early and _wrong_lca, respectively.
6998 Node* find_real_lca(Unique_Node_List& early_with_idoms, Unique_Node_List& wrong_lca_with_idoms) {
6999 int early_index = early_with_idoms.size() - 1;
7000 int wrong_lca_index = wrong_lca_with_idoms.size() - 1;
7001 bool found_difference = false;
7002 do {
7003 if (early_with_idoms[early_index] != wrong_lca_with_idoms[wrong_lca_index]) {
7004 // First time early and wrong LCA idoms differ. Real LCA must be at the previous index.
7005 found_difference = true;
7006 break;
7007 }
7008 early_index--;
7009 wrong_lca_index--;
7010 } while (wrong_lca_index >= 0);
7011
7012 assert(early_index >= 0, "must always find an LCA - cannot be early");
7013 _early_index = early_index;
7014 _wrong_lca_index = wrong_lca_index;
7015 Node* real_lca = early_with_idoms[_early_index + 1]; // Plus one to skip _early.
7016 assert(found_difference || real_lca == _wrong_lca, "wrong LCA dominates early and is therefore the real LCA");
7017 return real_lca;
7018 }
7019
7020 void dump(Node* real_lca) {
7021 tty->cr();
7022 tty->print_cr("idoms of early \"%d %s\":", _early->_idx, _early->Name());
7023 _phase->dump_idom(_early, _early_index + 1);
7024
7025 tty->cr();
7026 tty->print_cr("idoms of (wrong) LCA \"%d %s\":", _wrong_lca->_idx, _wrong_lca->Name());
7027 _phase->dump_idom(_wrong_lca, _wrong_lca_index + 1);
7028
7029 tty->cr();
7030 tty->print("Real LCA of early \"%d %s\" (idom[%d]) and wrong LCA \"%d %s\"",
7031 _early->_idx, _early->Name(), _early_index, _wrong_lca->_idx, _wrong_lca->Name());
7032 if (_wrong_lca_index >= 0) {
7033 tty->print(" (idom[%d])", _wrong_lca_index);
7034 }
7035 tty->print_cr(":");
7036 real_lca->dump();
7037 }
7038
7039 public:
7040 RealLCA(const PhaseIdealLoop* phase, Node* early, Node* wrong_lca)
7041 : _phase(phase), _early(early), _wrong_lca(wrong_lca), _early_index(0), _wrong_lca_index(0) {
7042 assert(!wrong_lca->is_Start(), "StartNode is always a common dominator");
7043 }
7044
7045 void compute_and_dump() {
7046 ResourceMark rm;
7047 Unique_Node_List early_with_idoms;
7048 Unique_Node_List wrong_lca_with_idoms;
7049 early_with_idoms.push(_early);
7050 wrong_lca_with_idoms.push(_wrong_lca);
7051 _phase->get_idoms(_early, 10000, early_with_idoms);
7052 _phase->get_idoms(_wrong_lca, 10000, wrong_lca_with_idoms);
7053 Node* real_lca = find_real_lca(early_with_idoms, wrong_lca_with_idoms);
7054 dump(real_lca);
7055 }
7056 };
7057
7058 // Dump the idom chain of early, of the wrong LCA and dump the real LCA of early and wrong LCA.
7059 void PhaseIdealLoop::dump_idoms(Node* early, Node* wrong_lca) {
7060 assert(!is_dominator(early, wrong_lca), "sanity check that early does not dominate wrong lca");
7061 assert(!has_ctrl(early) && !has_ctrl(wrong_lca), "sanity check, no data nodes");
7062
7063 RealLCA real_lca(this, early, wrong_lca);
7064 real_lca.compute_and_dump();
7065 }
7066 #endif // ASSERT
7067
7068 #ifndef PRODUCT
7069 //------------------------------dump-------------------------------------------
7070 void PhaseIdealLoop::dump() const {
7071 ResourceMark rm;
7072 Node_Stack stack(C->live_nodes() >> 2);
7073 Node_List rpo_list;
7074 VectorSet visited;
7075 visited.set(C->top()->_idx);
7076 rpo(C->root(), stack, visited, rpo_list);
7077 // Dump root loop indexed by last element in PO order
7078 dump(_ltree_root, rpo_list.size(), rpo_list);
7079 }
7080
7081 void PhaseIdealLoop::dump(IdealLoopTree* loop, uint idx, Node_List &rpo_list) const {
7082 loop->dump_head();
7083
7084 // Now scan for CFG nodes in the same loop
7085 for (uint j = idx; j > 0; j--) {
7086 Node* n = rpo_list[j-1];
7087 if (!_loop_or_ctrl[n->_idx]) // Skip dead nodes
7088 continue;
7089
7090 if (get_loop(n) != loop) { // Wrong loop nest
7091 if (get_loop(n)->_head == n && // Found nested loop?
7092 get_loop(n)->_parent == loop)
7093 dump(get_loop(n), rpo_list.size(), rpo_list); // Print it nested-ly
7094 continue;
7095 }
7096
7097 // Dump controlling node
7098 tty->sp(2 * loop->_nest);
7099 tty->print("C");
7100 if (n == C->root()) {
7101 n->dump();
7102 } else {
7103 Node* cached_idom = idom_no_update(n);
7104 Node* computed_idom = n->in(0);
7105 if (n->is_Region()) {
7106 computed_idom = compute_idom(n);
7107 // computed_idom() will return n->in(0) when idom(n) is an IfNode (or
7108 // any MultiBranch ctrl node), so apply a similar transform to
7109 // the cached idom returned from idom_no_update.
7110 cached_idom = find_non_split_ctrl(cached_idom);
7111 }
7112 tty->print(" ID:%d", computed_idom->_idx);
7113 n->dump();
7114 if (cached_idom != computed_idom) {
7115 tty->print_cr("*** BROKEN IDOM! Computed as: %d, cached as: %d",
7116 computed_idom->_idx, cached_idom->_idx);
7117 }
7118 }
7119 // Dump nodes it controls
7120 for (uint k = 0; k < _loop_or_ctrl.max(); k++) {
7121 // (k < C->unique() && get_ctrl(find(k)) == n)
7122 if (k < C->unique() && _loop_or_ctrl[k] == (Node*)((intptr_t)n + 1)) {
7123 Node* m = C->root()->find(k);
7124 if (m && m->outcnt() > 0) {
7125 if (!(has_ctrl(m) && get_ctrl_no_update(m) == n)) {
7126 tty->print_cr("*** BROKEN CTRL ACCESSOR! _loop_or_ctrl[k] is %p, ctrl is %p",
7127 _loop_or_ctrl[k], has_ctrl(m) ? get_ctrl_no_update(m) : nullptr);
7128 }
7129 tty->sp(2 * loop->_nest + 1);
7130 m->dump();
7131 }
7132 }
7133 }
7134 }
7135 }
7136
7137 void PhaseIdealLoop::dump_idom(Node* n, const uint count) const {
7138 if (has_ctrl(n)) {
7139 tty->print_cr("No idom for data nodes");
7140 } else {
7141 ResourceMark rm;
7142 Unique_Node_List idoms;
7143 get_idoms(n, count, idoms);
7144 dump_idoms_in_reverse(n, idoms);
7145 }
7146 }
7147
7148 void PhaseIdealLoop::get_idoms(Node* n, const uint count, Unique_Node_List& idoms) const {
7149 Node* next = n;
7150 for (uint i = 0; !next->is_Start() && i < count; i++) {
7151 next = idom(next);
7152 assert(!idoms.member(next), "duplicated idom is not possible");
7153 idoms.push(next);
7154 }
7155 }
7156
7157 void PhaseIdealLoop::dump_idoms_in_reverse(const Node* n, const Node_List& idom_list) const {
7158 Node* next;
7159 uint padding = 3;
7160 uint node_index_padding_width = (C->unique() == 0 ? 0 : static_cast<int>(log10(static_cast<double>(C->unique())))) + 1;
7161 for (int i = idom_list.size() - 1; i >= 0; i--) {
7162 if (i == 9 || i == 99) {
7163 padding++;
7164 }
7165 next = idom_list[i];
7166 tty->print_cr("idom[%d]:%*c%*d %s", i, padding, ' ', node_index_padding_width, next->_idx, next->Name());
7167 }
7168 tty->print_cr("n: %*c%*d %s", padding, ' ', node_index_padding_width, n->_idx, n->Name());
7169 }
7170 #endif // NOT PRODUCT
7171
7172 // Collect a R-P-O for the whole CFG.
7173 // Result list is in post-order (scan backwards for RPO)
7174 void PhaseIdealLoop::rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list) const {
7175 stk.push(start, 0);
7176 visited.set(start->_idx);
7177
7178 while (stk.is_nonempty()) {
7179 Node* m = stk.node();
7180 uint idx = stk.index();
7181 if (idx < m->outcnt()) {
7182 stk.set_index(idx + 1);
7183 Node* n = m->raw_out(idx);
7184 if (n->is_CFG() && !visited.test_set(n->_idx)) {
7185 stk.push(n, 0);
7186 }
7187 } else {
7188 rpo_list.push(m);
7189 stk.pop();
7190 }
7191 }
7192 }
7193
7194 ConINode* PhaseIdealLoop::intcon(jint i) {
7195 ConINode* node = _igvn.intcon(i);
7196 set_root_as_ctrl(node);
7197 return node;
7198 }
7199
7200 ConLNode* PhaseIdealLoop::longcon(jlong i) {
7201 ConLNode* node = _igvn.longcon(i);
7202 set_root_as_ctrl(node);
7203 return node;
7204 }
7205
7206 ConNode* PhaseIdealLoop::makecon(const Type* t) {
7207 ConNode* node = _igvn.makecon(t);
7208 set_root_as_ctrl(node);
7209 return node;
7210 }
7211
7212 ConNode* PhaseIdealLoop::integercon(jlong l, BasicType bt) {
7213 ConNode* node = _igvn.integercon(l, bt);
7214 set_root_as_ctrl(node);
7215 return node;
7216 }
7217
7218 ConNode* PhaseIdealLoop::zerocon(BasicType bt) {
7219 ConNode* node = _igvn.zerocon(bt);
7220 set_root_as_ctrl(node);
7221 return node;
7222 }
7223
7224
7225 //=============================================================================
7226 //------------------------------LoopTreeIterator-------------------------------
7227
7228 // Advance to next loop tree using a preorder, left-to-right traversal.
7229 void LoopTreeIterator::next() {
7230 assert(!done(), "must not be done.");
7231 if (_curnt->_child != nullptr) {
7232 _curnt = _curnt->_child;
7233 } else if (_curnt->_next != nullptr) {
7234 _curnt = _curnt->_next;
7235 } else {
7236 while (_curnt != _root && _curnt->_next == nullptr) {
7237 _curnt = _curnt->_parent;
7238 }
7239 if (_curnt == _root) {
7240 _curnt = nullptr;
7241 assert(done(), "must be done.");
7242 } else {
7243 assert(_curnt->_next != nullptr, "must be more to do");
7244 _curnt = _curnt->_next;
7245 }
7246 }
7247 }