1 /*
2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "memory/allocation.inline.hpp"
26 #include "opto/addnode.hpp"
27 #include "opto/callnode.hpp"
28 #include "opto/inlinetypenode.hpp"
29 #include "opto/loopnode.hpp"
30 #include "opto/movenode.hpp"
31 #include "opto/node.hpp"
32 #include "opto/opaquenode.hpp"
33 #include "opto/predicates.hpp"
34
35 //------------------------------split_thru_region------------------------------
36 // Split Node 'n' through merge point.
37 RegionNode* PhaseIdealLoop::split_thru_region(Node* n, RegionNode* region) {
38 assert(n->is_CFG(), "");
39 RegionNode* r = new RegionNode(region->req());
40 IdealLoopTree* loop = get_loop(n);
41 for (uint i = 1; i < region->req(); i++) {
42 Node* x = n->clone();
43 Node* in0 = n->in(0);
44 if (in0->in(0) == region) x->set_req(0, in0->in(i));
45 for (uint j = 1; j < n->req(); j++) {
46 Node* in = n->in(j);
47 if (get_ctrl(in) == region) {
48 x->set_req(j, in->in(i));
49 }
50 }
51 _igvn.register_new_node_with_optimizer(x);
52 set_loop(x, loop);
53 set_idom(x, x->in(0), dom_depth(x->in(0))+1);
54 r->init_req(i, x);
55 }
56
57 // Record region
58 r->set_req(0,region); // Not a TRUE RegionNode
59 _igvn.register_new_node_with_optimizer(r);
60 set_loop(r, loop);
61 if (!loop->_child) {
62 loop->_body.push(r);
63 }
64 return r;
65 }
66
67 //------------------------------split_up---------------------------------------
68 // Split block-local op up through the phis to empty the current block
69 bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
70 if( n->is_CFG() ) {
71 assert( n->in(0) != blk1, "Lousy candidate for split-if" );
72 return false;
73 }
74 if (!at_relevant_ctrl(n, blk1, blk2))
75 return false; // Not block local
76 if( n->is_Phi() ) return false; // Local PHIs are expected
77
78 // Recursively split-up inputs
79 for (uint i = 1; i < n->req(); i++) {
80 if( split_up( n->in(i), blk1, blk2 ) ) {
81 // Got split recursively and self went dead?
82 if (n->outcnt() == 0)
83 _igvn.remove_dead_node(n);
84 return true;
85 }
86 }
87
88 if (clone_cmp_loadklass_down(n, blk1, blk2)) {
89 return true;
90 }
91
92 // Check for needing to clone-up a compare. Can't do that, it forces
93 // another (nested) split-if transform. Instead, clone it "down".
94 if (clone_cmp_down(n, blk1, blk2)) {
95 return true;
96 }
97
98 clone_template_assertion_expression_down(n);
99
100 if (n->Opcode() == Op_OpaqueZeroTripGuard) {
101 // If this Opaque1 is part of the zero trip guard for a loop:
102 // 1- it can't be shared
103 // 2- the zero trip guard can't be the if that's being split
104 // As a consequence, this node could be assigned control anywhere between its current control and the zero trip guard.
105 // Move it down to get it out of the way of split if and avoid breaking the zero trip guard shape.
106 Node* cmp = n->unique_out();
107 assert(cmp->Opcode() == Op_CmpI, "bad zero trip guard shape");
108 Node* bol = cmp->unique_out();
109 assert(bol->Opcode() == Op_Bool, "bad zero trip guard shape");
110 Node* iff = bol->unique_out();
111 assert(iff->Opcode() == Op_If, "bad zero trip guard shape");
112 set_ctrl(n, iff->in(0));
113 set_ctrl(cmp, iff->in(0));
114 set_ctrl(bol, iff->in(0));
115 return true;
116 }
117
118 // See if splitting-up a Store. Any anti-dep loads must go up as
119 // well. An anti-dep load might be in the wrong block, because in
120 // this particular layout/schedule we ignored anti-deps and allow
121 // memory to be alive twice. This only works if we do the same
122 // operations on anti-dep loads as we do their killing stores.
123 if( n->is_Store() && n->in(MemNode::Memory)->in(0) == n->in(0) ) {
124 // Get store's memory slice
125 int alias_idx = C->get_alias_index(_igvn.type(n->in(MemNode::Address))->is_ptr());
126
127 // Get memory-phi anti-dep loads will be using
128 Node *memphi = n->in(MemNode::Memory);
129 assert( memphi->is_Phi(), "" );
130 // Hoist any anti-dep load to the splitting block;
131 // it will then "split-up".
132 for (DUIterator_Fast imax,i = memphi->fast_outs(imax); i < imax; i++) {
133 Node *load = memphi->fast_out(i);
134 if( load->is_Load() && alias_idx == C->get_alias_index(_igvn.type(load->in(MemNode::Address))->is_ptr()) )
135 set_ctrl(load,blk1);
136 }
137 }
138
139 // ConvI2L may have type information on it which becomes invalid if
140 // it moves up in the graph so change any clones so widen the type
141 // to TypeLong::INT when pushing it up.
142 const Type* rtype = nullptr;
143 if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::INT) {
144 rtype = TypeLong::INT;
145 }
146
147 // Now actually split-up this guy. One copy per control path merging.
148 Node *phi = PhiNode::make_blank(blk1, n);
149 for( uint j = 1; j < blk1->req(); j++ ) {
150 Node *x = n->clone();
151 // Widen the type of the ConvI2L when pushing up.
152 if (rtype != nullptr) x->as_Type()->set_type(rtype);
153 if( n->in(0) && n->in(0) == blk1 )
154 x->set_req( 0, blk1->in(j) );
155 for( uint i = 1; i < n->req(); i++ ) {
156 Node *m = n->in(i);
157 if( get_ctrl(m) == blk1 ) {
158 assert( m->in(0) == blk1, "" );
159 x->set_req( i, m->in(j) );
160 }
161 }
162 register_new_node( x, blk1->in(j) );
163 phi->init_req( j, x );
164 }
165 // Announce phi to optimizer
166 register_new_node(phi, blk1);
167
168 // Remove cloned-up value from optimizer; use phi instead
169 _igvn.replace_node( n, phi );
170
171 // (There used to be a self-recursive call to split_up() here,
172 // but it is not needed. All necessary forward walking is done
173 // by do_split_if() below.)
174
175 return true;
176 }
177
178 // Look for a (If .. (Bool(CmpP (LoadKlass .. (AddP obj ..)) ..))) and clone all of it down.
179 // There's likely a CheckCastPP on one of the branches of the If, with obj as input.
180 // If the (LoadKlass .. (AddP obj ..)) is not cloned down, then split if transforms this to: (If .. (Bool(CmpP phi1 ..)))
181 // and the CheckCastPP to (CheckCastPP phi2). It's possible then that phi2 is transformed to a CheckCastPP
182 // (through PhiNode::Ideal) and that that CheckCastPP is replaced by another narrower CheckCastPP at the same control
183 // (through ConstraintCastNode::Identity). That could cause the CheckCastPP at the If to become top while (CmpP phi1)
184 // wouldn't constant fold because it's using a different data path. Cloning the whole subgraph down guarantees both the
185 // AddP and CheckCastPP have the same obj input after split if.
186 bool PhaseIdealLoop::clone_cmp_loadklass_down(Node* n, const Node* blk1, const Node* blk2) {
187 if (n->Opcode() == Op_AddP && at_relevant_ctrl(n, blk1, blk2)) {
188 Node_List cmp_nodes;
189 uint old = C->unique();
190 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
191 Node* u1 = n->fast_out(i);
192 if (u1->Opcode() == Op_LoadNKlass && at_relevant_ctrl(u1, blk1, blk2)) {
193 for (DUIterator_Fast jmax, j = u1->fast_outs(jmax); j < jmax; j++) {
194 Node* u2 = u1->fast_out(j);
195 if (u2->Opcode() == Op_DecodeNKlass && at_relevant_ctrl(u2, blk1, blk2)) {
196 for (DUIterator k = u2->outs(); u2->has_out(k); k++) {
197 Node* u3 = u2->out(k);
198 if (at_relevant_ctrl(u3, blk1, blk2) && clone_cmp_down(u3, blk1, blk2)) {
199 --k;
200 }
201 }
202 for (DUIterator_Fast kmax, k = u2->fast_outs(kmax); k < kmax; k++) {
203 Node* u3 = u2->fast_out(k);
204 if (u3->_idx >= old) {
205 cmp_nodes.push(u3);
206 }
207 }
208 }
209 }
210 } else if (u1->Opcode() == Op_LoadKlass && at_relevant_ctrl(u1, blk1, blk2)) {
211 for (DUIterator j = u1->outs(); u1->has_out(j); j++) {
212 Node* u2 = u1->out(j);
213 if (at_relevant_ctrl(u2, blk1, blk2) && clone_cmp_down(u2, blk1, blk2)) {
214 --j;
215 }
216 }
217 for (DUIterator_Fast kmax, k = u1->fast_outs(kmax); k < kmax; k++) {
218 Node* u2 = u1->fast_out(k);
219 if (u2->_idx >= old) {
220 cmp_nodes.push(u2);
221 }
222 }
223 }
224 }
225
226 for (uint i = 0; i < cmp_nodes.size(); ++i) {
227 Node* cmp = cmp_nodes.at(i);
228 clone_loadklass_nodes_at_cmp_index(n, cmp, 1);
229 clone_loadklass_nodes_at_cmp_index(n, cmp, 2);
230 }
231 if (n->outcnt() == 0) {
232 assert(n->is_dead(), "");
233 return true;
234 }
235 }
236 return false;
237 }
238
239 bool PhaseIdealLoop::at_relevant_ctrl(Node* n, const Node* blk1, const Node* blk2) {
240 return ctrl_or_self(n) == blk1 || ctrl_or_self(n) == blk2;
241 }
242
243 void PhaseIdealLoop::clone_loadklass_nodes_at_cmp_index(const Node* n, Node* cmp, int i) {
244 Node* decode = cmp->in(i);
245 if (decode->Opcode() == Op_DecodeNKlass) {
246 Node* loadklass = decode->in(1);
247 if (loadklass->Opcode() == Op_LoadNKlass) {
248 Node* addp = loadklass->in(MemNode::Address);
249 if (addp == n) {
250 Node* ctrl = get_ctrl(cmp);
251 Node* decode_clone = decode->clone();
252 Node* loadklass_clone = loadklass->clone();
253 Node* addp_clone = addp->clone();
254 register_new_node(decode_clone, ctrl);
255 register_new_node(loadklass_clone, ctrl);
256 register_new_node(addp_clone, ctrl);
257 _igvn.replace_input_of(cmp, i, decode_clone);
258 _igvn.replace_input_of(decode_clone, 1, loadklass_clone);
259 _igvn.replace_input_of(loadklass_clone, MemNode::Address, addp_clone);
260 if (decode->outcnt() == 0) {
261 _igvn.remove_dead_node(decode);
262 }
263 }
264 }
265 } else {
266 Node* loadklass = cmp->in(i);
267 if (loadklass->Opcode() == Op_LoadKlass) {
268 Node* addp = loadklass->in(MemNode::Address);
269 if (addp == n) {
270 Node* ctrl = get_ctrl(cmp);
271 Node* loadklass_clone = loadklass->clone();
272 Node* addp_clone = addp->clone();
273 register_new_node(loadklass_clone, ctrl);
274 register_new_node(addp_clone, ctrl);
275 _igvn.replace_input_of(cmp, i, loadklass_clone);
276 _igvn.replace_input_of(loadklass_clone, MemNode::Address, addp_clone);
277 if (loadklass->outcnt() == 0) {
278 _igvn.remove_dead_node(loadklass);
279 }
280 }
281 }
282 }
283 }
284
285 bool PhaseIdealLoop::clone_cmp_down(Node* n, const Node* blk1, const Node* blk2) {
286 if( n->is_Cmp() ) {
287 assert(get_ctrl(n) == blk2 || get_ctrl(n) == blk1, "must be in block with IF");
288 // Check for simple Cmp/Bool/CMove which we can clone-up. Cmp/Bool/CMove
289 // sequence can have no other users and it must all reside in the split-if
290 // block. Non-simple Cmp/Bool/CMove sequences are 'cloned-down' below -
291 // private, per-use versions of the Cmp and Bool are made. These sink to
292 // the CMove block. If the CMove is in the split-if block, then in the
293 // next iteration this will become a simple Cmp/Bool/CMove set to clone-up.
294 Node *bol, *cmov;
295 if (!(n->outcnt() == 1 && n->unique_out()->is_Bool() &&
296 (bol = n->unique_out()->as_Bool()) &&
297 (at_relevant_ctrl(bol, blk1, blk2) &&
298 bol->outcnt() == 1 &&
299 bol->unique_out()->is_CMove() &&
300 (cmov = bol->unique_out()->as_CMove()) &&
301 at_relevant_ctrl(cmov, blk1, blk2)))) {
302
303 // Must clone down
304 if (!n->is_FastLock()) {
305 // Clone down any block-local BoolNode uses of this CmpNode
306 for (DUIterator i = n->outs(); n->has_out(i); i++) {
307 Node* bol = n->out(i);
308 assert( bol->is_Bool(), "" );
309 if (bol->outcnt() == 1) {
310 Node* use = bol->unique_out();
311 if (use->is_OpaqueNotNull() || use->is_OpaqueTemplateAssertionPredicate() ||
312 use->is_OpaqueInitializedAssertionPredicate()) {
313 if (use->outcnt() == 1) {
314 Node* iff = use->unique_out();
315 assert(iff->is_If(), "unexpected node type");
316 Node *use_c = iff->in(0);
317 if (use_c == blk1 || use_c == blk2) {
318 continue;
319 }
320 }
321 } else {
322 // We might see an Opaque1 from a loop limit check here
323 assert(use->is_If() || use->is_CMove() || use->Opcode() == Op_Opaque1 || use->is_AllocateArray(), "unexpected node type");
324 Node *use_c = (use->is_If() || use->is_AllocateArray()) ? use->in(0) : get_ctrl(use);
325 if (use_c == blk1 || use_c == blk2) {
326 assert(use->is_CMove(), "unexpected node type");
327 continue;
328 }
329 }
330 }
331 if (at_relevant_ctrl(bol, blk1, blk2)) {
332 // Recursively sink any BoolNode
333 for (DUIterator j = bol->outs(); bol->has_out(j); j++) {
334 Node* u = bol->out(j);
335 // Uses are either IfNodes, CMoves, OpaqueNotNull, or Opaque*AssertionPredicate
336 if (u->is_OpaqueNotNull() || u->is_OpaqueTemplateAssertionPredicate() ||
337 u->is_OpaqueInitializedAssertionPredicate()) {
338 assert(u->in(1) == bol, "bad input");
339 for (DUIterator_Last kmin, k = u->last_outs(kmin); k >= kmin; --k) {
340 Node* iff = u->last_out(k);
341 assert(iff->is_If() || iff->is_CMove(), "unexpected node type");
342 assert( iff->in(1) == u, "" );
343 // Get control block of either the CMove or the If input
344 Node *iff_ctrl = iff->is_If() ? iff->in(0) : get_ctrl(iff);
345 Node *x1 = bol->clone();
346 Node *x2 = u->clone();
347 register_new_node(x1, iff_ctrl);
348 register_new_node(x2, iff_ctrl);
349 _igvn.replace_input_of(x2, 1, x1);
350 _igvn.replace_input_of(iff, 1, x2);
351 }
352 _igvn.remove_dead_node(u);
353 --j;
354 } else {
355 // We might see an Opaque1 from a loop limit check here
356 assert(u->is_If() || u->is_CMove() || u->Opcode() == Op_Opaque1 || u->is_AllocateArray(), "unexpected node type");
357 assert(u->is_AllocateArray() || u->in(1) == bol, "");
358 assert(!u->is_AllocateArray() || u->in(AllocateNode::ValidLengthTest) == bol, "wrong input to AllocateArray");
359 // Get control block of either the CMove or the If input
360 Node *u_ctrl = (u->is_If() || u->is_AllocateArray()) ? u->in(0) : get_ctrl(u);
361 assert((u_ctrl != blk1 && u_ctrl != blk2) || u->is_CMove(), "won't converge");
362 Node *x = bol->clone();
363 register_new_node(x, u_ctrl);
364 _igvn.replace_input_of(u, u->is_AllocateArray() ? AllocateNode::ValidLengthTest : 1, x);
365 --j;
366 }
367 }
368 _igvn.remove_dead_node(bol);
369 --i;
370 }
371 }
372 }
373 // Clone down this CmpNode
374 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; --j) {
375 Node* use = n->last_out(j);
376 uint pos = 1;
377 if (n->is_FastLock()) {
378 pos = TypeFunc::Parms + 2;
379 assert(use->is_Lock(), "FastLock only used by LockNode");
380 }
381 assert(use->in(pos) == n, "" );
382 Node *x = n->clone();
383 register_new_node(x, ctrl_or_self(use));
384 _igvn.replace_input_of(use, pos, x);
385 }
386 _igvn.remove_dead_node(n);
387
388 return true;
389 }
390 }
391 return false;
392 }
393
394 // 'n' could be a node belonging to a Template Assertion Expression (i.e. any node between a Template Assertion Predicate
395 // and its OpaqueLoop* nodes (included)). We cannot simply split this node up since this would create a phi node inside
396 // the Template Assertion Expression - making it unrecognizable as such. Therefore, we completely clone the entire
397 // Template Assertion Expression "down". This ensures that we have an untouched copy that is still recognized by the
398 // Template Assertion Predicate matching code.
399 void PhaseIdealLoop::clone_template_assertion_expression_down(Node* node) {
400 if (!TemplateAssertionExpressionNode::is_in_expression(node)) {
401 return;
402 }
403
404 TemplateAssertionExpressionNode template_assertion_expression_node(node);
405 auto clone_expression = [&](IfNode* template_assertion_predicate) {
406 OpaqueTemplateAssertionPredicateNode* opaque_node =
407 template_assertion_predicate->in(1)->as_OpaqueTemplateAssertionPredicate();
408 TemplateAssertionExpression template_assertion_expression(opaque_node, this);
409 Node* new_control = template_assertion_predicate->in(0);
410 OpaqueTemplateAssertionPredicateNode* cloned_opaque_node = template_assertion_expression.clone(new_control,
411 opaque_node->loop_node());
412 igvn().replace_input_of(template_assertion_predicate, 1, cloned_opaque_node);
413 };
414 template_assertion_expression_node.for_each_template_assertion_predicate(clone_expression);
415 }
416
417 //------------------------------register_new_node------------------------------
418 void PhaseIdealLoop::register_new_node( Node *n, Node *blk ) {
419 assert(!n->is_CFG(), "must be data node");
420 _igvn.register_new_node_with_optimizer(n);
421 set_ctrl(n, blk);
422 IdealLoopTree *loop = get_loop(blk);
423 if( !loop->_child )
424 loop->_body.push(n);
425 }
426
427 //------------------------------small_cache------------------------------------
428 struct small_cache : public Dict {
429
430 small_cache() : Dict( cmpkey, hashptr ) {}
431 Node *probe( Node *use_blk ) { return (Node*)((*this)[use_blk]); }
432 void lru_insert( Node *use_blk, Node *new_def ) { Insert(use_blk,new_def); }
433 };
434
435 //------------------------------spinup-----------------------------------------
436 // "Spin up" the dominator tree, starting at the use site and stopping when we
437 // find the post-dominating point.
438
439 // We must be at the merge point which post-dominates 'new_false' and
440 // 'new_true'. Figure out which edges into the RegionNode eventually lead up
441 // to false and which to true. Put in a PhiNode to merge values; plug in
442 // the appropriate false-arm or true-arm values. If some path leads to the
443 // original IF, then insert a Phi recursively.
444 Node *PhaseIdealLoop::spinup( Node *iff_dom, Node *new_false, Node *new_true, Node *use_blk, Node *def, small_cache *cache ) {
445 if (use_blk->is_top()) // Handle dead uses
446 return use_blk;
447 Node *prior_n = (Node*)((intptr_t)0xdeadbeef);
448 Node *n = use_blk; // Get path input
449 assert( use_blk != iff_dom, "" );
450 // Here's the "spinup" the dominator tree loop. Do a cache-check
451 // along the way, in case we've come this way before.
452 while( n != iff_dom ) { // Found post-dominating point?
453 prior_n = n;
454 n = idom(n); // Search higher
455 Node *s = cache->probe( prior_n ); // Check cache
456 if( s ) return s; // Cache hit!
457 }
458
459 Node *phi_post;
460 if( prior_n == new_false || prior_n == new_true ) {
461 phi_post = def->clone();
462 phi_post->set_req(0, prior_n );
463 register_new_node(phi_post, prior_n);
464 } else {
465 // This method handles both control uses (looking for Regions) or data
466 // uses (looking for Phis). If looking for a control use, then we need
467 // to insert a Region instead of a Phi; however Regions always exist
468 // previously (the hash_find_insert below would always hit) so we can
469 // return the existing Region.
470 if( def->is_CFG() ) {
471 phi_post = prior_n; // If looking for CFG, return prior
472 } else {
473 assert( def->is_Phi(), "" );
474 assert( prior_n->is_Region(), "must be a post-dominating merge point" );
475
476 // Need a Phi here
477 phi_post = PhiNode::make_blank(prior_n, def);
478 // Search for both true and false on all paths till find one.
479 for( uint i = 1; i < phi_post->req(); i++ ) // For all paths
480 phi_post->init_req( i, spinup( iff_dom, new_false, new_true, prior_n->in(i), def, cache ) );
481 Node *t = _igvn.hash_find_insert(phi_post);
482 if( t ) { // See if we already have this one
483 // phi_post will not be used, so kill it
484 _igvn.remove_dead_node(phi_post);
485 phi_post->destruct(&_igvn);
486 phi_post = t;
487 } else {
488 register_new_node( phi_post, prior_n );
489 }
490 }
491 }
492
493 // Update cache everywhere
494 prior_n = (Node*)((intptr_t)0xdeadbeef); // Reset IDOM walk
495 n = use_blk; // Get path input
496 // Spin-up the idom tree again, basically doing path-compression.
497 // Insert cache entries along the way, so that if we ever hit this
498 // point in the IDOM tree again we'll stop immediately on a cache hit.
499 while( n != iff_dom ) { // Found post-dominating point?
500 prior_n = n;
501 n = idom(n); // Search higher
502 cache->lru_insert( prior_n, phi_post ); // Fill cache
503 } // End of while not gone high enough
504
505 return phi_post;
506 }
507
508 //------------------------------find_use_block---------------------------------
509 // Find the block a USE is in. Normally USE's are in the same block as the
510 // using instruction. For Phi-USE's, the USE is in the predecessor block
511 // along the corresponding path.
512 Node *PhaseIdealLoop::find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true ) {
513 // CFG uses are their own block
514 if( use->is_CFG() )
515 return use;
516
517 if( use->is_Phi() ) { // Phi uses in prior block
518 // Grab the first Phi use; there may be many.
519 // Each will be handled as a separate iteration of
520 // the "while( phi->outcnt() )" loop.
521 uint j;
522 for( j = 1; j < use->req(); j++ )
523 if( use->in(j) == def )
524 break;
525 assert( j < use->req(), "def should be among use's inputs" );
526 return use->in(0)->in(j);
527 }
528 // Normal (non-phi) use
529 Node *use_blk = get_ctrl(use);
530 // Some uses are directly attached to the old (and going away)
531 // false and true branches.
532 if( use_blk == old_false ) {
533 use_blk = new_false;
534 set_ctrl(use, new_false);
535 }
536 if( use_blk == old_true ) {
537 use_blk = new_true;
538 set_ctrl(use, new_true);
539 }
540
541 if (use_blk == nullptr) { // He's dead, Jim
542 _igvn.replace_node(use, C->top());
543 }
544
545 return use_blk;
546 }
547
548 //------------------------------handle_use-------------------------------------
549 // Handle uses of the merge point. Basically, split-if makes the merge point
550 // go away so all uses of the merge point must go away as well. Most block
551 // local uses have already been split-up, through the merge point. Uses from
552 // far below the merge point can't always be split up (e.g., phi-uses are
553 // pinned) and it makes too much stuff live. Instead we use a path-based
554 // solution to move uses down.
555 //
556 // If the use is along the pre-split-CFG true branch, then the new use will
557 // be from the post-split-CFG true merge point. Vice-versa for the false
558 // path. Some uses will be along both paths; then we sink the use to the
559 // post-dominating location; we may need to insert a Phi there.
560 void PhaseIdealLoop::handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true ) {
561
562 Node *use_blk = find_use_block(use,def,old_false,new_false,old_true,new_true);
563 if( !use_blk ) return; // He's dead, Jim
564
565 // Walk up the dominator tree until I hit either the old IfFalse, the old
566 // IfTrue or the old If. Insert Phis where needed.
567 Node *new_def = spinup( region_dom, new_false, new_true, use_blk, def, cache );
568
569 // Found where this USE goes. Re-point him.
570 uint i;
571 for( i = 0; i < use->req(); i++ )
572 if( use->in(i) == def )
573 break;
574 assert( i < use->req(), "def should be among use's inputs" );
575 _igvn.replace_input_of(use, i, new_def);
576 }
577
578 //------------------------------do_split_if------------------------------------
579 // Found an If getting its condition-code input from a Phi in the same block.
580 // Split thru the Region.
581 void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, RegionNode** new_true_region) {
582
583 C->set_major_progress();
584 RegionNode *region = iff->in(0)->as_Region();
585 Node *region_dom = idom(region);
586
587 // We are going to clone this test (and the control flow with it) up through
588 // the incoming merge point. We need to empty the current basic block.
589 // Clone any instructions which must be in this block up through the merge
590 // point.
591 DUIterator i, j;
592 bool progress = true;
593 while (progress) {
594 progress = false;
595 for (i = region->outs(); region->has_out(i); i++) {
596 Node* n = region->out(i);
597 if( n == region ) continue;
598 // The IF to be split is OK.
599 if( n == iff ) continue;
600 if( !n->is_Phi() ) { // Found pinned memory op or such
601 if (split_up(n, region, iff)) {
602 i = region->refresh_out_pos(i);
603 progress = true;
604 }
605 continue;
606 }
607 assert( n->in(0) == region, "" );
608
609 // Recursively split up all users of a Phi
610 for (j = n->outs(); n->has_out(j); j++) {
611 Node* m = n->out(j);
612 // If m is dead, throw it away, and declare progress
613 if (_loop_or_ctrl[m->_idx] == nullptr) {
614 _igvn.remove_dead_node(m);
615 // fall through
616 } else if (m != iff && split_up(m, region, iff)) {
617 // fall through
618 } else {
619 continue;
620 }
621 // Something unpredictable changed.
622 // Tell the iterators to refresh themselves, and rerun the loop.
623 i = region->refresh_out_pos(i);
624 j = region->refresh_out_pos(j);
625 progress = true;
626 }
627 }
628 }
629
630 // Now we have no instructions in the block containing the IF.
631 // Split the IF.
632 RegionNode *new_iff = split_thru_region(iff, region);
633
634 // Replace both uses of 'new_iff' with Regions merging True/False
635 // paths. This makes 'new_iff' go dead.
636 Node *old_false = nullptr, *old_true = nullptr;
637 RegionNode* new_false = nullptr;
638 RegionNode* new_true = nullptr;
639 for (DUIterator_Last j2min, j2 = iff->last_outs(j2min); j2 >= j2min; --j2) {
640 Node *ifp = iff->last_out(j2);
641 assert( ifp->Opcode() == Op_IfFalse || ifp->Opcode() == Op_IfTrue, "" );
642 ifp->set_req(0, new_iff);
643 RegionNode* ifpx = split_thru_region(ifp, region);
644
645 // Replace 'If' projection of a Region with a Region of
646 // 'If' projections.
647 ifpx->set_req(0, ifpx); // A TRUE RegionNode
648
649 // Setup dominator info
650 set_idom(ifpx, region_dom, dom_depth(region_dom) + 1);
651
652 // Check for splitting loop tails
653 if( get_loop(iff)->tail() == ifp )
654 get_loop(iff)->_tail = ifpx;
655
656 // Replace in the graph with lazy-update mechanism
657 new_iff->set_req(0, new_iff); // hook self so it does not go dead
658 lazy_replace(ifp, ifpx);
659 new_iff->set_req(0, region);
660
661 // Record bits for later xforms
662 if( ifp->Opcode() == Op_IfFalse ) {
663 old_false = ifp;
664 new_false = ifpx;
665 } else {
666 old_true = ifp;
667 new_true = ifpx;
668 }
669 }
670 _igvn.remove_dead_node(new_iff);
671 // Lazy replace IDOM info with the region's dominator
672 lazy_replace(iff, region_dom);
673 lazy_update(region, region_dom); // idom must be update before handle_uses
674 region->set_req(0, nullptr); // Break the self-cycle. Required for lazy_update to work on region
675
676 // Now make the original merge point go dead, by handling all its uses.
677 small_cache region_cache;
678 // Preload some control flow in region-cache
679 region_cache.lru_insert( new_false, new_false );
680 region_cache.lru_insert( new_true , new_true );
681 // Now handle all uses of the splitting block
682 for (DUIterator k = region->outs(); region->has_out(k); k++) {
683 Node* phi = region->out(k);
684 if (!phi->in(0)) { // Dead phi? Remove it
685 _igvn.remove_dead_node(phi);
686 } else if (phi == region) { // Found the self-reference
687 continue; // No roll-back of DUIterator
688 } else if (phi->is_Phi()) { // Expected common case: Phi hanging off of Region
689 assert(phi->in(0) == region, "Inconsistent graph");
690 // Need a per-def cache. Phi represents a def, so make a cache
691 small_cache phi_cache;
692
693 // Inspect all Phi uses to make the Phi go dead
694 for (DUIterator_Last lmin, l = phi->last_outs(lmin); l >= lmin; --l) {
695 Node* use = phi->last_out(l);
696 // Compute the new DEF for this USE. New DEF depends on the path
697 // taken from the original DEF to the USE. The new DEF may be some
698 // collection of PHI's merging values from different paths. The Phis
699 // inserted depend only on the location of the USE. We use a
700 // 2-element cache to handle multiple uses from the same block.
701 handle_use(use, phi, &phi_cache, region_dom, new_false, new_true, old_false, old_true);
702 } // End of while phi has uses
703 // Remove the dead Phi
704 _igvn.remove_dead_node( phi );
705 } else {
706 assert(phi->in(0) == region, "Inconsistent graph");
707 // Random memory op guarded by Region. Compute new DEF for USE.
708 handle_use(phi, region, ®ion_cache, region_dom, new_false, new_true, old_false, old_true);
709 }
710 // Every path above deletes a use of the region, except for the region
711 // self-cycle (which is needed by handle_use calling find_use_block
712 // calling get_ctrl calling get_ctrl_no_update looking for dead
713 // regions). So roll back the DUIterator innards.
714 --k;
715 } // End of while merge point has phis
716
717 _igvn.remove_dead_node(region);
718 if (iff->Opcode() == Op_RangeCheck) {
719 // Pin array access nodes: control is updated here to a region. If, after some transformations, only one path
720 // into the region is left, an array load could become dependent on a condition that's not a range check for
721 // that access. If that condition is replaced by an identical dominating one, then an unpinned load would risk
722 // floating above its range check.
723 pin_array_access_nodes_dependent_on(new_true);
724 pin_array_access_nodes_dependent_on(new_false);
725 }
726
727 if (new_false_region != nullptr) {
728 *new_false_region = new_false;
729 }
730 if (new_true_region != nullptr) {
731 *new_true_region = new_true;
732 }
733
734 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
735 }
736
737 void PhaseIdealLoop::pin_array_access_nodes_dependent_on(Node* ctrl) {
738 for (DUIterator i = ctrl->outs(); ctrl->has_out(i); i++) {
739 Node* use = ctrl->out(i);
740 if (!use->depends_only_on_test()) {
741 continue;
742 }
743 Node* pinned_clone = use->pin_array_access_node();
744 if (pinned_clone != nullptr) {
745 register_new_node_with_ctrl_of(pinned_clone, use);
746 _igvn.replace_node(use, pinned_clone);
747 --i;
748 }
749 }
750 }