1 /*
  2  * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "memory/allocation.inline.hpp"
 27 #include "opto/addnode.hpp"
 28 #include "opto/callnode.hpp"
 29 #include "opto/inlinetypenode.hpp"
 30 #include "opto/loopnode.hpp"
 31 #include "opto/movenode.hpp"
 32 #include "opto/node.hpp"
 33 #include "opto/opaquenode.hpp"
 34 #include "opto/predicates.hpp"
 35 
 36 //------------------------------split_thru_region------------------------------
 37 // Split Node 'n' through merge point.
 38 RegionNode* PhaseIdealLoop::split_thru_region(Node* n, RegionNode* region) {
 39   assert(n->is_CFG(), "");
 40   RegionNode* r = new RegionNode(region->req());
 41   IdealLoopTree* loop = get_loop(n);
 42   for (uint i = 1; i < region->req(); i++) {
 43     Node* x = n->clone();
 44     Node* in0 = n->in(0);
 45     if (in0->in(0) == region) x->set_req(0, in0->in(i));
 46     for (uint j = 1; j < n->req(); j++) {
 47       Node* in = n->in(j);
 48       if (get_ctrl(in) == region) {
 49         x->set_req(j, in->in(i));
 50       }
 51     }
 52     _igvn.register_new_node_with_optimizer(x);
 53     set_loop(x, loop);
 54     set_idom(x, x->in(0), dom_depth(x->in(0))+1);
 55     r->init_req(i, x);
 56   }
 57 
 58   // Record region
 59   r->set_req(0,region);         // Not a TRUE RegionNode
 60   _igvn.register_new_node_with_optimizer(r);
 61   set_loop(r, loop);
 62   if (!loop->_child) {
 63     loop->_body.push(r);
 64   }
 65   return r;
 66 }
 67 
 68 //------------------------------split_up---------------------------------------
 69 // Split block-local op up through the phis to empty the current block
 70 bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
 71   if( n->is_CFG() ) {
 72     assert( n->in(0) != blk1, "Lousy candidate for split-if" );
 73     return false;
 74   }
 75   if (!at_relevant_ctrl(n, blk1, blk2))
 76     return false;               // Not block local
 77   if( n->is_Phi() ) return false; // Local PHIs are expected
 78 
 79   // Recursively split-up inputs
 80   for (uint i = 1; i < n->req(); i++) {
 81     if( split_up( n->in(i), blk1, blk2 ) ) {
 82       // Got split recursively and self went dead?
 83       if (n->outcnt() == 0)
 84         _igvn.remove_dead_node(n);
 85       return true;
 86     }
 87   }
 88 
 89   if (clone_cmp_loadklass_down(n, blk1, blk2)) {
 90     return true;
 91   }
 92 
 93   // Check for needing to clone-up a compare.  Can't do that, it forces
 94   // another (nested) split-if transform.  Instead, clone it "down".
 95   if (clone_cmp_down(n, blk1, blk2)) {
 96     return true;
 97   }
 98 
 99   clone_template_assertion_expression_down(n);
100 
101   if (n->Opcode() == Op_OpaqueZeroTripGuard) {
102     // If this Opaque1 is part of the zero trip guard for a loop:
103     // 1- it can't be shared
104     // 2- the zero trip guard can't be the if that's being split
105     // As a consequence, this node could be assigned control anywhere between its current control and the zero trip guard.
106     // Move it down to get it out of the way of split if and avoid breaking the zero trip guard shape.
107     Node* cmp = n->unique_out();
108     assert(cmp->Opcode() == Op_CmpI, "bad zero trip guard shape");
109     Node* bol = cmp->unique_out();
110     assert(bol->Opcode() == Op_Bool, "bad zero trip guard shape");
111     Node* iff = bol->unique_out();
112     assert(iff->Opcode() == Op_If, "bad zero trip guard shape");
113     set_ctrl(n, iff->in(0));
114     set_ctrl(cmp, iff->in(0));
115     set_ctrl(bol, iff->in(0));
116     return true;
117   }
118 
119   // See if splitting-up a Store.  Any anti-dep loads must go up as
120   // well.  An anti-dep load might be in the wrong block, because in
121   // this particular layout/schedule we ignored anti-deps and allow
122   // memory to be alive twice.  This only works if we do the same
123   // operations on anti-dep loads as we do their killing stores.
124   if( n->is_Store() && n->in(MemNode::Memory)->in(0) == n->in(0) ) {
125     // Get store's memory slice
126     int alias_idx = C->get_alias_index(_igvn.type(n->in(MemNode::Address))->is_ptr());
127 
128     // Get memory-phi anti-dep loads will be using
129     Node *memphi = n->in(MemNode::Memory);
130     assert( memphi->is_Phi(), "" );
131     // Hoist any anti-dep load to the splitting block;
132     // it will then "split-up".
133     for (DUIterator_Fast imax,i = memphi->fast_outs(imax); i < imax; i++) {
134       Node *load = memphi->fast_out(i);
135       if( load->is_Load() && alias_idx == C->get_alias_index(_igvn.type(load->in(MemNode::Address))->is_ptr()) )
136         set_ctrl(load,blk1);
137     }
138   }
139 
140   // Found some other Node; must clone it up
141 #ifndef PRODUCT
142   if( PrintOpto && VerifyLoopOptimizations ) {
143     tty->print("Cloning up: ");
144     n->dump();
145   }
146 #endif
147 
148   // ConvI2L may have type information on it which becomes invalid if
149   // it moves up in the graph so change any clones so widen the type
150   // to TypeLong::INT when pushing it up.
151   const Type* rtype = nullptr;
152   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::INT) {
153     rtype = TypeLong::INT;
154   }
155 
156   // Now actually split-up this guy.  One copy per control path merging.
157   Node *phi = PhiNode::make_blank(blk1, n);
158   for( uint j = 1; j < blk1->req(); j++ ) {
159     Node *x = n->clone();
160     // Widen the type of the ConvI2L when pushing up.
161     if (rtype != nullptr) x->as_Type()->set_type(rtype);
162     if( n->in(0) && n->in(0) == blk1 )
163       x->set_req( 0, blk1->in(j) );
164     for( uint i = 1; i < n->req(); i++ ) {
165       Node *m = n->in(i);
166       if( get_ctrl(m) == blk1 ) {
167         assert( m->in(0) == blk1, "" );
168         x->set_req( i, m->in(j) );
169       }
170     }
171     register_new_node( x, blk1->in(j) );
172     phi->init_req( j, x );
173   }
174   // Announce phi to optimizer
175   register_new_node(phi, blk1);
176 
177   // Remove cloned-up value from optimizer; use phi instead
178   _igvn.replace_node( n, phi );
179 
180   // (There used to be a self-recursive call to split_up() here,
181   // but it is not needed.  All necessary forward walking is done
182   // by do_split_if() below.)
183 
184   return true;
185 }
186 
187 // Look for a (If .. (Bool(CmpP (LoadKlass .. (AddP obj ..)) ..))) and clone all of it down.
188 // There's likely a CheckCastPP on one of the branches of the If, with obj as input.
189 // If the (LoadKlass .. (AddP obj ..)) is not cloned down, then split if transforms this to: (If .. (Bool(CmpP phi1 ..)))
190 // and the CheckCastPP to (CheckCastPP phi2). It's possible then that phi2 is transformed to a CheckCastPP
191 // (through PhiNode::Ideal) and that that CheckCastPP is replaced by another narrower CheckCastPP at the same control
192 // (through ConstraintCastNode::Identity). That could cause the CheckCastPP at the If to become top while (CmpP phi1)
193 // wouldn't constant fold because it's using a different data path. Cloning the whole subgraph down guarantees both the
194 // AddP and CheckCastPP have the same obj input after split if.
195 bool PhaseIdealLoop::clone_cmp_loadklass_down(Node* n, const Node* blk1, const Node* blk2) {
196   if (n->Opcode() == Op_AddP && at_relevant_ctrl(n, blk1, blk2)) {
197     Node_List cmp_nodes;
198     uint old = C->unique();
199     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
200       Node* u1 = n->fast_out(i);
201       if (u1->Opcode() == Op_LoadNKlass && at_relevant_ctrl(u1, blk1, blk2)) {
202         for (DUIterator_Fast jmax, j = u1->fast_outs(jmax); j < jmax; j++) {
203           Node* u2 = u1->fast_out(j);
204           if (u2->Opcode() == Op_DecodeNKlass && at_relevant_ctrl(u2, blk1, blk2)) {
205             for (DUIterator k = u2->outs(); u2->has_out(k); k++) {
206               Node* u3 = u2->out(k);
207               if (at_relevant_ctrl(u3, blk1, blk2) && clone_cmp_down(u3, blk1, blk2)) {
208                 --k;
209               }
210             }
211             for (DUIterator_Fast kmax, k = u2->fast_outs(kmax); k < kmax; k++) {
212               Node* u3 = u2->fast_out(k);
213               if (u3->_idx >= old) {
214                 cmp_nodes.push(u3);
215               }
216             }
217           }
218         }
219       } else if (u1->Opcode() == Op_LoadKlass && at_relevant_ctrl(u1, blk1, blk2)) {
220         for (DUIterator j = u1->outs(); u1->has_out(j); j++) {
221           Node* u2 = u1->out(j);
222           if (at_relevant_ctrl(u2, blk1, blk2) && clone_cmp_down(u2, blk1, blk2)) {
223             --j;
224           }
225         }
226         for (DUIterator_Fast kmax, k = u1->fast_outs(kmax); k < kmax; k++) {
227           Node* u2 = u1->fast_out(k);
228           if (u2->_idx >= old) {
229             cmp_nodes.push(u2);
230           }
231         }
232       }
233     }
234 
235     for (uint i = 0; i < cmp_nodes.size(); ++i) {
236       Node* cmp = cmp_nodes.at(i);
237       clone_loadklass_nodes_at_cmp_index(n, cmp, 1);
238       clone_loadklass_nodes_at_cmp_index(n, cmp, 2);
239     }
240     if (n->outcnt() == 0) {
241       assert(n->is_dead(), "");
242       return true;
243     }
244   }
245   return false;
246 }
247 
248 bool PhaseIdealLoop::at_relevant_ctrl(Node* n, const Node* blk1, const Node* blk2) {
249   return ctrl_or_self(n) == blk1 || ctrl_or_self(n) == blk2;
250 }
251 
252 void PhaseIdealLoop::clone_loadklass_nodes_at_cmp_index(const Node* n, Node* cmp, int i) {
253   Node* decode = cmp->in(i);
254   if (decode->Opcode() == Op_DecodeNKlass) {
255     Node* loadklass = decode->in(1);
256     if (loadklass->Opcode() == Op_LoadNKlass) {
257       Node* addp = loadklass->in(MemNode::Address);
258       if (addp == n) {
259         Node* ctrl = get_ctrl(cmp);
260         Node* decode_clone = decode->clone();
261         Node* loadklass_clone = loadklass->clone();
262         Node* addp_clone = addp->clone();
263         register_new_node(decode_clone, ctrl);
264         register_new_node(loadklass_clone, ctrl);
265         register_new_node(addp_clone, ctrl);
266         _igvn.replace_input_of(cmp, i, decode_clone);
267         _igvn.replace_input_of(decode_clone, 1, loadklass_clone);
268         _igvn.replace_input_of(loadklass_clone, MemNode::Address, addp_clone);
269         if (decode->outcnt() == 0) {
270           _igvn.remove_dead_node(decode);
271         }
272       }
273     }
274   } else {
275     Node* loadklass = cmp->in(i);
276     if (loadklass->Opcode() == Op_LoadKlass) {
277       Node* addp = loadklass->in(MemNode::Address);
278       if (addp == n) {
279         Node* ctrl = get_ctrl(cmp);
280         Node* loadklass_clone = loadklass->clone();
281         Node* addp_clone = addp->clone();
282         register_new_node(loadklass_clone, ctrl);
283         register_new_node(addp_clone, ctrl);
284         _igvn.replace_input_of(cmp, i, loadklass_clone);
285         _igvn.replace_input_of(loadklass_clone, MemNode::Address, addp_clone);
286         if (loadklass->outcnt() == 0) {
287           _igvn.remove_dead_node(loadklass);
288         }
289       }
290     }
291   }
292 }
293 
294 bool PhaseIdealLoop::clone_cmp_down(Node* n, const Node* blk1, const Node* blk2) {
295   if( n->is_Cmp() ) {
296     assert(get_ctrl(n) == blk2 || get_ctrl(n) == blk1, "must be in block with IF");
297     // Check for simple Cmp/Bool/CMove which we can clone-up.  Cmp/Bool/CMove
298     // sequence can have no other users and it must all reside in the split-if
299     // block.  Non-simple Cmp/Bool/CMove sequences are 'cloned-down' below -
300     // private, per-use versions of the Cmp and Bool are made.  These sink to
301     // the CMove block.  If the CMove is in the split-if block, then in the
302     // next iteration this will become a simple Cmp/Bool/CMove set to clone-up.
303     Node *bol, *cmov;
304     if (!(n->outcnt() == 1 && n->unique_out()->is_Bool() &&
305           (bol = n->unique_out()->as_Bool()) &&
306           (at_relevant_ctrl(bol, blk1, blk2) &&
307            bol->outcnt() == 1 &&
308            bol->unique_out()->is_CMove() &&
309            (cmov = bol->unique_out()->as_CMove()) &&
310            at_relevant_ctrl(cmov, blk1, blk2)))) {
311 
312       // Must clone down
313 #ifndef PRODUCT
314       if( PrintOpto && VerifyLoopOptimizations ) {
315         tty->print("Cloning down: ");
316         n->dump();
317       }
318 #endif
319       if (!n->is_FastLock()) {
320         // Clone down any block-local BoolNode uses of this CmpNode
321         for (DUIterator i = n->outs(); n->has_out(i); i++) {
322           Node* bol = n->out(i);
323           assert( bol->is_Bool(), "" );
324           if (bol->outcnt() == 1) {
325             Node* use = bol->unique_out();
326             if (use->is_OpaqueNotNull() || use->is_OpaqueTemplateAssertionPredicate() ||
327                 use->is_OpaqueInitializedAssertionPredicate()) {
328               if (use->outcnt() == 1) {
329                 Node* iff = use->unique_out();
330                 assert(iff->is_If(), "unexpected node type");
331                 Node *use_c = iff->in(0);
332                 if (use_c == blk1 || use_c == blk2) {
333                   continue;
334                 }
335               }
336             } else {
337               // We might see an Opaque1 from a loop limit check here
338               assert(use->is_If() || use->is_CMove() || use->Opcode() == Op_Opaque1 || use->is_AllocateArray(), "unexpected node type");
339               Node *use_c = (use->is_If() || use->is_AllocateArray()) ? use->in(0) : get_ctrl(use);
340               if (use_c == blk1 || use_c == blk2) {
341                 assert(use->is_CMove(), "unexpected node type");
342                 continue;
343               }
344             }
345           }
346           if (at_relevant_ctrl(bol, blk1, blk2)) {
347             // Recursively sink any BoolNode
348 #ifndef PRODUCT
349             if( PrintOpto && VerifyLoopOptimizations ) {
350               tty->print("Cloning down: ");
351               bol->dump();
352             }
353 #endif
354             for (DUIterator j = bol->outs(); bol->has_out(j); j++) {
355               Node* u = bol->out(j);
356               // Uses are either IfNodes, CMoves, OpaqueNotNull, or Opaque*AssertionPredicate
357               if (u->is_OpaqueNotNull() || u->is_OpaqueTemplateAssertionPredicate() ||
358                   u->is_OpaqueInitializedAssertionPredicate()) {
359                 assert(u->in(1) == bol, "bad input");
360                 for (DUIterator_Last kmin, k = u->last_outs(kmin); k >= kmin; --k) {
361                   Node* iff = u->last_out(k);
362                   assert(iff->is_If() || iff->is_CMove(), "unexpected node type");
363                   assert( iff->in(1) == u, "" );
364                   // Get control block of either the CMove or the If input
365                   Node *iff_ctrl = iff->is_If() ? iff->in(0) : get_ctrl(iff);
366                   Node *x1 = bol->clone();
367                   Node *x2 = u->clone();
368                   register_new_node(x1, iff_ctrl);
369                   register_new_node(x2, iff_ctrl);
370                   _igvn.replace_input_of(x2, 1, x1);
371                   _igvn.replace_input_of(iff, 1, x2);
372                 }
373                 _igvn.remove_dead_node(u);
374                 --j;
375               } else {
376                 // We might see an Opaque1 from a loop limit check here
377                 assert(u->is_If() || u->is_CMove() || u->Opcode() == Op_Opaque1 || u->is_AllocateArray(), "unexpected node type");
378                 assert(u->is_AllocateArray() || u->in(1) == bol, "");
379                 assert(!u->is_AllocateArray() || u->in(AllocateNode::ValidLengthTest) == bol, "wrong input to AllocateArray");
380                 // Get control block of either the CMove or the If input
381                 Node *u_ctrl = (u->is_If() || u->is_AllocateArray()) ? u->in(0) : get_ctrl(u);
382                 assert((u_ctrl != blk1 && u_ctrl != blk2) || u->is_CMove(), "won't converge");
383                 Node *x = bol->clone();
384                 register_new_node(x, u_ctrl);
385                 _igvn.replace_input_of(u, u->is_AllocateArray() ? AllocateNode::ValidLengthTest : 1, x);
386                 --j;
387               }
388             }
389             _igvn.remove_dead_node(bol);
390             --i;
391           }
392         }
393       }
394       // Clone down this CmpNode
395       for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; --j) {
396         Node* use = n->last_out(j);
397         uint pos = 1;
398         if (n->is_FastLock()) {
399           pos = TypeFunc::Parms + 2;
400           assert(use->is_Lock(), "FastLock only used by LockNode");
401         }
402         assert(use->in(pos) == n, "" );
403         Node *x = n->clone();
404         register_new_node(x, ctrl_or_self(use));
405         _igvn.replace_input_of(use, pos, x);
406       }
407       _igvn.remove_dead_node(n);
408 
409       return true;
410     }
411   }
412   return false;
413 }
414 
415 // 'n' could be a node belonging to a Template Assertion Expression (i.e. any node between a Template Assertion Predicate
416 // and its OpaqueLoop* nodes (included)). We cannot simply split this node up since this would  create a phi node inside
417 // the Template Assertion Expression - making it unrecognizable as such. Therefore, we completely clone the entire
418 // Template Assertion Expression "down". This ensures that we have an untouched copy that is still recognized by the
419 // Template Assertion Predicate matching code.
420 void PhaseIdealLoop::clone_template_assertion_expression_down(Node* node) {
421   if (!TemplateAssertionExpressionNode::is_in_expression(node)) {
422     return;
423   }
424 
425   TemplateAssertionExpressionNode template_assertion_expression_node(node);
426   auto clone_expression = [&](IfNode* template_assertion_predicate) {
427     OpaqueTemplateAssertionPredicateNode* opaque_node =
428         template_assertion_predicate->in(1)->as_OpaqueTemplateAssertionPredicate();
429     TemplateAssertionExpression template_assertion_expression(opaque_node);
430     Node* new_ctrl = template_assertion_predicate->in(0);
431     OpaqueTemplateAssertionPredicateNode* cloned_opaque_node = template_assertion_expression.clone(new_ctrl, this);
432     igvn().replace_input_of(template_assertion_predicate, 1, cloned_opaque_node);
433   };
434   template_assertion_expression_node.for_each_template_assertion_predicate(clone_expression);
435 }
436 
437 //------------------------------register_new_node------------------------------
438 void PhaseIdealLoop::register_new_node( Node *n, Node *blk ) {
439   assert(!n->is_CFG(), "must be data node");
440   _igvn.register_new_node_with_optimizer(n);
441   set_ctrl(n, blk);
442   IdealLoopTree *loop = get_loop(blk);
443   if( !loop->_child )
444     loop->_body.push(n);
445 }
446 
447 //------------------------------small_cache------------------------------------
448 struct small_cache : public Dict {
449 
450   small_cache() : Dict( cmpkey, hashptr ) {}
451   Node *probe( Node *use_blk ) { return (Node*)((*this)[use_blk]); }
452   void lru_insert( Node *use_blk, Node *new_def ) { Insert(use_blk,new_def); }
453 };
454 
455 //------------------------------spinup-----------------------------------------
456 // "Spin up" the dominator tree, starting at the use site and stopping when we
457 // find the post-dominating point.
458 
459 // We must be at the merge point which post-dominates 'new_false' and
460 // 'new_true'.  Figure out which edges into the RegionNode eventually lead up
461 // to false and which to true.  Put in a PhiNode to merge values; plug in
462 // the appropriate false-arm or true-arm values.  If some path leads to the
463 // original IF, then insert a Phi recursively.
464 Node *PhaseIdealLoop::spinup( Node *iff_dom, Node *new_false, Node *new_true, Node *use_blk, Node *def, small_cache *cache ) {
465   if (use_blk->is_top())        // Handle dead uses
466     return use_blk;
467   Node *prior_n = (Node*)((intptr_t)0xdeadbeef);
468   Node *n = use_blk;            // Get path input
469   assert( use_blk != iff_dom, "" );
470   // Here's the "spinup" the dominator tree loop.  Do a cache-check
471   // along the way, in case we've come this way before.
472   while( n != iff_dom ) {       // Found post-dominating point?
473     prior_n = n;
474     n = idom(n);                // Search higher
475     Node *s = cache->probe( prior_n ); // Check cache
476     if( s ) return s;           // Cache hit!
477   }
478 
479   Node *phi_post;
480   if( prior_n == new_false || prior_n == new_true ) {
481     phi_post = def->clone();
482     phi_post->set_req(0, prior_n );
483     register_new_node(phi_post, prior_n);
484   } else {
485     // This method handles both control uses (looking for Regions) or data
486     // uses (looking for Phis).  If looking for a control use, then we need
487     // to insert a Region instead of a Phi; however Regions always exist
488     // previously (the hash_find_insert below would always hit) so we can
489     // return the existing Region.
490     if( def->is_CFG() ) {
491       phi_post = prior_n;       // If looking for CFG, return prior
492     } else {
493       assert( def->is_Phi(), "" );
494       assert( prior_n->is_Region(), "must be a post-dominating merge point" );
495 
496       // Need a Phi here
497       phi_post = PhiNode::make_blank(prior_n, def);
498       // Search for both true and false on all paths till find one.
499       for( uint i = 1; i < phi_post->req(); i++ ) // For all paths
500         phi_post->init_req( i, spinup( iff_dom, new_false, new_true, prior_n->in(i), def, cache ) );
501       Node *t = _igvn.hash_find_insert(phi_post);
502       if( t ) {                 // See if we already have this one
503         // phi_post will not be used, so kill it
504         _igvn.remove_dead_node(phi_post);
505         phi_post->destruct(&_igvn);
506         phi_post = t;
507       } else {
508         register_new_node( phi_post, prior_n );
509       }
510     }
511   }
512 
513   // Update cache everywhere
514   prior_n = (Node*)((intptr_t)0xdeadbeef);  // Reset IDOM walk
515   n = use_blk;                  // Get path input
516   // Spin-up the idom tree again, basically doing path-compression.
517   // Insert cache entries along the way, so that if we ever hit this
518   // point in the IDOM tree again we'll stop immediately on a cache hit.
519   while( n != iff_dom ) {       // Found post-dominating point?
520     prior_n = n;
521     n = idom(n);                // Search higher
522     cache->lru_insert( prior_n, phi_post ); // Fill cache
523   } // End of while not gone high enough
524 
525   return phi_post;
526 }
527 
528 //------------------------------find_use_block---------------------------------
529 // Find the block a USE is in.  Normally USE's are in the same block as the
530 // using instruction.  For Phi-USE's, the USE is in the predecessor block
531 // along the corresponding path.
532 Node *PhaseIdealLoop::find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true ) {
533   // CFG uses are their own block
534   if( use->is_CFG() )
535     return use;
536 
537   if( use->is_Phi() ) {         // Phi uses in prior block
538     // Grab the first Phi use; there may be many.
539     // Each will be handled as a separate iteration of
540     // the "while( phi->outcnt() )" loop.
541     uint j;
542     for( j = 1; j < use->req(); j++ )
543       if( use->in(j) == def )
544         break;
545     assert( j < use->req(), "def should be among use's inputs" );
546     return use->in(0)->in(j);
547   }
548   // Normal (non-phi) use
549   Node *use_blk = get_ctrl(use);
550   // Some uses are directly attached to the old (and going away)
551   // false and true branches.
552   if( use_blk == old_false ) {
553     use_blk = new_false;
554     set_ctrl(use, new_false);
555   }
556   if( use_blk == old_true ) {
557     use_blk = new_true;
558     set_ctrl(use, new_true);
559   }
560 
561   if (use_blk == nullptr) {        // He's dead, Jim
562     _igvn.replace_node(use, C->top());
563   }
564 
565   return use_blk;
566 }
567 
568 //------------------------------handle_use-------------------------------------
569 // Handle uses of the merge point.  Basically, split-if makes the merge point
570 // go away so all uses of the merge point must go away as well.  Most block
571 // local uses have already been split-up, through the merge point.  Uses from
572 // far below the merge point can't always be split up (e.g., phi-uses are
573 // pinned) and it makes too much stuff live.  Instead we use a path-based
574 // solution to move uses down.
575 //
576 // If the use is along the pre-split-CFG true branch, then the new use will
577 // be from the post-split-CFG true merge point.  Vice-versa for the false
578 // path.  Some uses will be along both paths; then we sink the use to the
579 // post-dominating location; we may need to insert a Phi there.
580 void PhaseIdealLoop::handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true ) {
581 
582   Node *use_blk = find_use_block(use,def,old_false,new_false,old_true,new_true);
583   if( !use_blk ) return;        // He's dead, Jim
584 
585   // Walk up the dominator tree until I hit either the old IfFalse, the old
586   // IfTrue or the old If.  Insert Phis where needed.
587   Node *new_def = spinup( region_dom, new_false, new_true, use_blk, def, cache );
588 
589   // Found where this USE goes.  Re-point him.
590   uint i;
591   for( i = 0; i < use->req(); i++ )
592     if( use->in(i) == def )
593       break;
594   assert( i < use->req(), "def should be among use's inputs" );
595   _igvn.replace_input_of(use, i, new_def);
596 }
597 
598 //------------------------------do_split_if------------------------------------
599 // Found an If getting its condition-code input from a Phi in the same block.
600 // Split thru the Region.
601 void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, RegionNode** new_true_region) {
602 
603   C->set_major_progress();
604   RegionNode *region = iff->in(0)->as_Region();
605   Node *region_dom = idom(region);
606 
607   // We are going to clone this test (and the control flow with it) up through
608   // the incoming merge point.  We need to empty the current basic block.
609   // Clone any instructions which must be in this block up through the merge
610   // point.
611   DUIterator i, j;
612   bool progress = true;
613   while (progress) {
614     progress = false;
615     for (i = region->outs(); region->has_out(i); i++) {
616       Node* n = region->out(i);
617       if( n == region ) continue;
618       // The IF to be split is OK.
619       if( n == iff ) continue;
620       if( !n->is_Phi() ) {      // Found pinned memory op or such
621         if (split_up(n, region, iff)) {
622           i = region->refresh_out_pos(i);
623           progress = true;
624         }
625         continue;
626       }
627       assert( n->in(0) == region, "" );
628 
629       // Recursively split up all users of a Phi
630       for (j = n->outs(); n->has_out(j); j++) {
631         Node* m = n->out(j);
632         // If m is dead, throw it away, and declare progress
633         if (_loop_or_ctrl[m->_idx] == nullptr) {
634           _igvn.remove_dead_node(m);
635           // fall through
636         } else if (m != iff && split_up(m, region, iff)) {
637           // fall through
638         } else {
639           continue;
640         }
641         // Something unpredictable changed.
642         // Tell the iterators to refresh themselves, and rerun the loop.
643         i = region->refresh_out_pos(i);
644         j = region->refresh_out_pos(j);
645         progress = true;
646       }
647     }
648   }
649 
650   // Now we have no instructions in the block containing the IF.
651   // Split the IF.
652   RegionNode *new_iff = split_thru_region(iff, region);
653 
654   // Replace both uses of 'new_iff' with Regions merging True/False
655   // paths.  This makes 'new_iff' go dead.
656   Node *old_false = nullptr, *old_true = nullptr;
657   RegionNode* new_false = nullptr;
658   RegionNode* new_true = nullptr;
659   for (DUIterator_Last j2min, j2 = iff->last_outs(j2min); j2 >= j2min; --j2) {
660     Node *ifp = iff->last_out(j2);
661     assert( ifp->Opcode() == Op_IfFalse || ifp->Opcode() == Op_IfTrue, "" );
662     ifp->set_req(0, new_iff);
663     RegionNode* ifpx = split_thru_region(ifp, region);
664 
665     // Replace 'If' projection of a Region with a Region of
666     // 'If' projections.
667     ifpx->set_req(0, ifpx);       // A TRUE RegionNode
668 
669     // Setup dominator info
670     set_idom(ifpx, region_dom, dom_depth(region_dom) + 1);
671 
672     // Check for splitting loop tails
673     if( get_loop(iff)->tail() == ifp )
674       get_loop(iff)->_tail = ifpx;
675 
676     // Replace in the graph with lazy-update mechanism
677     new_iff->set_req(0, new_iff); // hook self so it does not go dead
678     lazy_replace(ifp, ifpx);
679     new_iff->set_req(0, region);
680 
681     // Record bits for later xforms
682     if( ifp->Opcode() == Op_IfFalse ) {
683       old_false = ifp;
684       new_false = ifpx;
685     } else {
686       old_true = ifp;
687       new_true = ifpx;
688     }
689   }
690   _igvn.remove_dead_node(new_iff);
691   // Lazy replace IDOM info with the region's dominator
692   lazy_replace(iff, region_dom);
693   lazy_update(region, region_dom); // idom must be update before handle_uses
694   region->set_req(0, nullptr);        // Break the self-cycle. Required for lazy_update to work on region
695 
696   // Now make the original merge point go dead, by handling all its uses.
697   small_cache region_cache;
698   // Preload some control flow in region-cache
699   region_cache.lru_insert( new_false, new_false );
700   region_cache.lru_insert( new_true , new_true  );
701   // Now handle all uses of the splitting block
702   for (DUIterator k = region->outs(); region->has_out(k); k++) {
703     Node* phi = region->out(k);
704     if (!phi->in(0)) {         // Dead phi?  Remove it
705       _igvn.remove_dead_node(phi);
706     } else if (phi == region) { // Found the self-reference
707       continue;                 // No roll-back of DUIterator
708     } else if (phi->is_Phi()) { // Expected common case: Phi hanging off of Region
709       assert(phi->in(0) == region, "Inconsistent graph");
710       // Need a per-def cache.  Phi represents a def, so make a cache
711       small_cache phi_cache;
712 
713       // Inspect all Phi uses to make the Phi go dead
714       for (DUIterator_Last lmin, l = phi->last_outs(lmin); l >= lmin; --l) {
715         Node* use = phi->last_out(l);
716         // Compute the new DEF for this USE.  New DEF depends on the path
717         // taken from the original DEF to the USE.  The new DEF may be some
718         // collection of PHI's merging values from different paths.  The Phis
719         // inserted depend only on the location of the USE.  We use a
720         // 2-element cache to handle multiple uses from the same block.
721         handle_use(use, phi, &phi_cache, region_dom, new_false, new_true, old_false, old_true);
722       } // End of while phi has uses
723       // Remove the dead Phi
724       _igvn.remove_dead_node( phi );
725     } else {
726       assert(phi->in(0) == region, "Inconsistent graph");
727       // Random memory op guarded by Region.  Compute new DEF for USE.
728       handle_use(phi, region, &region_cache, region_dom, new_false, new_true, old_false, old_true);
729     }
730     // Every path above deletes a use of the region, except for the region
731     // self-cycle (which is needed by handle_use calling find_use_block
732     // calling get_ctrl calling get_ctrl_no_update looking for dead
733     // regions).  So roll back the DUIterator innards.
734     --k;
735   } // End of while merge point has phis
736 
737   _igvn.remove_dead_node(region);
738   if (iff->Opcode() == Op_RangeCheck) {
739     // Pin array access nodes: control is updated here to a region. If, after some transformations, only one path
740     // into the region is left, an array load could become dependent on a condition that's not a range check for
741     // that access. If that condition is replaced by an identical dominating one, then an unpinned load would risk
742     // floating above its range check.
743     pin_array_access_nodes_dependent_on(new_true);
744     pin_array_access_nodes_dependent_on(new_false);
745   }
746 
747   if (new_false_region != nullptr) {
748     *new_false_region = new_false;
749   }
750   if (new_true_region != nullptr) {
751     *new_true_region = new_true;
752   }
753 
754   DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
755 }
756 
757 void PhaseIdealLoop::pin_array_access_nodes_dependent_on(Node* ctrl) {
758   for (DUIterator i = ctrl->outs(); ctrl->has_out(i); i++) {
759     Node* use = ctrl->out(i);
760     if (!use->depends_only_on_test()) {
761       continue;
762     }
763     Node* pinned_clone = use->pin_array_access_node();
764     if (pinned_clone != nullptr) {
765       register_new_node_with_ctrl_of(pinned_clone, use);
766       _igvn.replace_node(use, pinned_clone);
767       --i;
768     }
769   }
770 }