1 /* 2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/allocation.inline.hpp" 27 #include "opto/addnode.hpp" 28 #include "opto/connode.hpp" 29 #include "opto/divnode.hpp" 30 #include "opto/loopnode.hpp" 31 #include "opto/matcher.hpp" 32 #include "opto/mulnode.hpp" 33 #include "opto/rootnode.hpp" 34 #include "opto/subnode.hpp" 35 #if INCLUDE_ALL_GCS 36 #include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" 37 #endif 38 39 //============================================================================= 40 //------------------------------split_thru_phi--------------------------------- 41 // Split Node 'n' through merge point if there is enough win. 42 Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) { 43 if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) { 44 // ConvI2L may have type information on it which is unsafe to push up 45 // so disable this for now 46 return NULL; 47 } 48 49 // Splitting range check CastIIs through a loop induction Phi can 50 // cause new Phis to be created that are left unrelated to the loop 51 // induction Phi and prevent optimizations (vectorization) 52 if (n->Opcode() == Op_CastII && n->as_CastII()->has_range_check() && 53 region->is_CountedLoop() && n->in(1) == region->as_CountedLoop()->phi()) { 54 return NULL; 55 } 56 57 int wins = 0; 58 assert(!n->is_CFG(), ""); 59 assert(region->is_Region(), ""); 60 61 const Type* type = n->bottom_type(); 62 const TypeOopPtr *t_oop = _igvn.type(n)->isa_oopptr(); 63 Node *phi; 64 if (t_oop != NULL && t_oop->is_known_instance_field()) { 65 int iid = t_oop->instance_id(); 66 int index = C->get_alias_index(t_oop); 67 int offset = t_oop->offset(); 68 phi = new (C) PhiNode(region, type, NULL, iid, index, offset); 69 } else { 70 phi = PhiNode::make_blank(region, n); 71 } 72 uint old_unique = C->unique(); 73 for (uint i = 1; i < region->req(); i++) { 74 Node *x; 75 Node* the_clone = NULL; 76 if (region->in(i) == C->top()) { 77 x = C->top(); // Dead path? Use a dead data op 78 } else { 79 x = n->clone(); // Else clone up the data op 80 the_clone = x; // Remember for possible deletion. 81 // Alter data node to use pre-phi inputs 82 if (n->in(0) == region) 83 x->set_req( 0, region->in(i) ); 84 for (uint j = 1; j < n->req(); j++) { 85 Node *in = n->in(j); 86 if (in->is_Phi() && in->in(0) == region) 87 x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone 88 } 89 } 90 // Check for a 'win' on some paths 91 const Type *t = x->Value(&_igvn); 92 93 bool singleton = t->singleton(); 94 95 // A TOP singleton indicates that there are no possible values incoming 96 // along a particular edge. In most cases, this is OK, and the Phi will 97 // be eliminated later in an Ideal call. However, we can't allow this to 98 // happen if the singleton occurs on loop entry, as the elimination of 99 // the PhiNode may cause the resulting node to migrate back to a previous 100 // loop iteration. 101 if (singleton && t == Type::TOP) { 102 // Is_Loop() == false does not confirm the absence of a loop (e.g., an 103 // irreducible loop may not be indicated by an affirmative is_Loop()); 104 // therefore, the only top we can split thru a phi is on a backedge of 105 // a loop. 106 singleton &= region->is_Loop() && (i != LoopNode::EntryControl); 107 } 108 109 if (singleton) { 110 wins++; 111 x = ((PhaseGVN&)_igvn).makecon(t); 112 } else { 113 // We now call Identity to try to simplify the cloned node. 114 // Note that some Identity methods call phase->type(this). 115 // Make sure that the type array is big enough for 116 // our new node, even though we may throw the node away. 117 // (Note: This tweaking with igvn only works because x is a new node.) 118 _igvn.set_type(x, t); 119 // If x is a TypeNode, capture any more-precise type permanently into Node 120 // otherwise it will be not updated during igvn->transform since 121 // igvn->type(x) is set to x->Value() already. 122 x->raise_bottom_type(t); 123 if (x->Opcode() != Op_ShenandoahLoadReferenceBarrier) { 124 Node *y = x->Identity(&_igvn); 125 if (y != x) { 126 wins++; 127 x = y; 128 } else { 129 y = _igvn.hash_find(x); 130 if (y) { 131 wins++; 132 x = y; 133 } else { 134 // Else x is a new node we are keeping 135 // We do not need register_new_node_with_optimizer 136 // because set_type has already been called. 137 _igvn._worklist.push(x); 138 } 139 } 140 } else { 141 _igvn._worklist.push(x); 142 } 143 } 144 if (x != the_clone && the_clone != NULL) 145 _igvn.remove_dead_node(the_clone); 146 phi->set_req( i, x ); 147 } 148 // Too few wins? 149 if (wins <= policy) { 150 _igvn.remove_dead_node(phi); 151 return NULL; 152 } 153 154 // Record Phi 155 register_new_node( phi, region ); 156 157 for (uint i2 = 1; i2 < phi->req(); i2++) { 158 Node *x = phi->in(i2); 159 // If we commoned up the cloned 'x' with another existing Node, 160 // the existing Node picks up a new use. We need to make the 161 // existing Node occur higher up so it dominates its uses. 162 Node *old_ctrl; 163 IdealLoopTree *old_loop; 164 165 if (x->is_Con()) { 166 // Constant's control is always root. 167 set_ctrl(x, C->root()); 168 continue; 169 } 170 // The occasional new node 171 if (x->_idx >= old_unique) { // Found a new, unplaced node? 172 old_ctrl = NULL; 173 old_loop = NULL; // Not in any prior loop 174 } else { 175 old_ctrl = get_ctrl(x); 176 old_loop = get_loop(old_ctrl); // Get prior loop 177 } 178 // New late point must dominate new use 179 Node *new_ctrl = dom_lca(old_ctrl, region->in(i2)); 180 if (new_ctrl == old_ctrl) // Nothing is changed 181 continue; 182 183 IdealLoopTree *new_loop = get_loop(new_ctrl); 184 185 // Don't move x into a loop if its uses are 186 // outside of loop. Otherwise x will be cloned 187 // for each use outside of this loop. 188 IdealLoopTree *use_loop = get_loop(region); 189 if (!new_loop->is_member(use_loop) && 190 (old_loop == NULL || !new_loop->is_member(old_loop))) { 191 // Take early control, later control will be recalculated 192 // during next iteration of loop optimizations. 193 new_ctrl = get_early_ctrl(x); 194 new_loop = get_loop(new_ctrl); 195 } 196 // Set new location 197 set_ctrl(x, new_ctrl); 198 // If changing loop bodies, see if we need to collect into new body 199 if (old_loop != new_loop) { 200 if (old_loop && !old_loop->_child) 201 old_loop->_body.yank(x); 202 if (!new_loop->_child) 203 new_loop->_body.push(x); // Collect body info 204 } 205 } 206 207 return phi; 208 } 209 210 //------------------------------dominated_by------------------------------------ 211 // Replace the dominated test with an obvious true or false. Place it on the 212 // IGVN worklist for later cleanup. Move control-dependent data Nodes on the 213 // live path up to the dominating control. 214 void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exclude_loop_predicate ) { 215 #ifndef PRODUCT 216 if (VerifyLoopOptimizations && PrintOpto) tty->print_cr("dominating test"); 217 #endif 218 219 220 // prevdom is the dominating projection of the dominating test. 221 assert( iff->is_If(), "" ); 222 assert( iff->Opcode() == Op_If || iff->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added"); 223 int pop = prevdom->Opcode(); 224 assert( pop == Op_IfFalse || pop == Op_IfTrue, "" ); 225 if (flip) { 226 if (pop == Op_IfTrue) 227 pop = Op_IfFalse; 228 else 229 pop = Op_IfTrue; 230 } 231 // 'con' is set to true or false to kill the dominated test. 232 Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO); 233 set_ctrl(con, C->root()); // Constant gets a new use 234 // Hack the dominated test 235 _igvn.replace_input_of(iff, 1, con); 236 237 // If I dont have a reachable TRUE and FALSE path following the IfNode then 238 // I can assume this path reaches an infinite loop. In this case it's not 239 // important to optimize the data Nodes - either the whole compilation will 240 // be tossed or this path (and all data Nodes) will go dead. 241 if (iff->outcnt() != 2) return; 242 243 // Make control-dependent data Nodes on the live path (path that will remain 244 // once the dominated IF is removed) become control-dependent on the 245 // dominating projection. 246 Node* dp = iff->as_If()->proj_out(pop == Op_IfTrue); 247 248 // Loop predicates may have depending checks which should not 249 // be skipped. For example, range check predicate has two checks 250 // for lower and upper bounds. 251 if (dp == NULL) 252 return; 253 254 ProjNode* dp_proj = dp->as_Proj(); 255 ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj(); 256 if (exclude_loop_predicate && 257 (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) || 258 unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check))) { 259 // If this is a range check (IfNode::is_range_check), do not 260 // reorder because Compile::allow_range_check_smearing might have 261 // changed the check. 262 return; // Let IGVN transformation change control dependence. 263 } 264 265 IdealLoopTree *old_loop = get_loop(dp); 266 267 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { 268 Node* cd = dp->fast_out(i); // Control-dependent node 269 if (cd->depends_only_on_test()) { 270 assert(cd->in(0) == dp, ""); 271 _igvn.replace_input_of(cd, 0, prevdom); 272 set_early_ctrl(cd); 273 IdealLoopTree *new_loop = get_loop(get_ctrl(cd)); 274 if (old_loop != new_loop) { 275 if (!old_loop->_child) old_loop->_body.yank(cd); 276 if (!new_loop->_child) new_loop->_body.push(cd); 277 } 278 --i; 279 --imax; 280 } 281 } 282 } 283 284 //------------------------------has_local_phi_input---------------------------- 285 // Return TRUE if 'n' has Phi inputs from its local block and no other 286 // block-local inputs (all non-local-phi inputs come from earlier blocks) 287 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) { 288 Node *n_ctrl = get_ctrl(n); 289 // See if some inputs come from a Phi in this block, or from before 290 // this block. 291 uint i; 292 for( i = 1; i < n->req(); i++ ) { 293 Node *phi = n->in(i); 294 if( phi->is_Phi() && phi->in(0) == n_ctrl ) 295 break; 296 } 297 if( i >= n->req() ) 298 return NULL; // No Phi inputs; nowhere to clone thru 299 300 // Check for inputs created between 'n' and the Phi input. These 301 // must split as well; they have already been given the chance 302 // (courtesy of a post-order visit) and since they did not we must 303 // recover the 'cost' of splitting them by being very profitable 304 // when splitting 'n'. Since this is unlikely we simply give up. 305 for( i = 1; i < n->req(); i++ ) { 306 Node *m = n->in(i); 307 if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) { 308 // We allow the special case of AddP's with no local inputs. 309 // This allows us to split-up address expressions. 310 if (m->is_AddP() && 311 get_ctrl(m->in(2)) != n_ctrl && 312 get_ctrl(m->in(3)) != n_ctrl) { 313 // Move the AddP up to dominating point 314 Node* c = find_non_split_ctrl(idom(n_ctrl)); 315 set_ctrl_and_loop(m, c); 316 continue; 317 } 318 return NULL; 319 } 320 assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control"); 321 } 322 323 return n_ctrl; 324 } 325 326 //------------------------------remix_address_expressions---------------------- 327 // Rework addressing expressions to get the most loop-invariant stuff 328 // moved out. We'd like to do all associative operators, but it's especially 329 // important (common) to do address expressions. 330 Node *PhaseIdealLoop::remix_address_expressions( Node *n ) { 331 if (!has_ctrl(n)) return NULL; 332 Node *n_ctrl = get_ctrl(n); 333 IdealLoopTree *n_loop = get_loop(n_ctrl); 334 335 // See if 'n' mixes loop-varying and loop-invariant inputs and 336 // itself is loop-varying. 337 338 // Only interested in binary ops (and AddP) 339 if( n->req() < 3 || n->req() > 4 ) return NULL; 340 341 Node *n1_ctrl = get_ctrl(n->in( 1)); 342 Node *n2_ctrl = get_ctrl(n->in( 2)); 343 Node *n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3)); 344 IdealLoopTree *n1_loop = get_loop( n1_ctrl ); 345 IdealLoopTree *n2_loop = get_loop( n2_ctrl ); 346 IdealLoopTree *n3_loop = get_loop( n3_ctrl ); 347 348 // Does one of my inputs spin in a tighter loop than self? 349 if( (n_loop->is_member( n1_loop ) && n_loop != n1_loop) || 350 (n_loop->is_member( n2_loop ) && n_loop != n2_loop) || 351 (n_loop->is_member( n3_loop ) && n_loop != n3_loop) ) 352 return NULL; // Leave well enough alone 353 354 // Is at least one of my inputs loop-invariant? 355 if( n1_loop == n_loop && 356 n2_loop == n_loop && 357 n3_loop == n_loop ) 358 return NULL; // No loop-invariant inputs 359 360 361 int n_op = n->Opcode(); 362 363 // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2). 364 if( n_op == Op_LShiftI ) { 365 // Scale is loop invariant 366 Node *scale = n->in(2); 367 Node *scale_ctrl = get_ctrl(scale); 368 IdealLoopTree *scale_loop = get_loop(scale_ctrl ); 369 if( n_loop == scale_loop || !scale_loop->is_member( n_loop ) ) 370 return NULL; 371 const TypeInt *scale_t = scale->bottom_type()->isa_int(); 372 if( scale_t && scale_t->is_con() && scale_t->get_con() >= 16 ) 373 return NULL; // Dont bother with byte/short masking 374 // Add must vary with loop (else shift would be loop-invariant) 375 Node *add = n->in(1); 376 Node *add_ctrl = get_ctrl(add); 377 IdealLoopTree *add_loop = get_loop(add_ctrl); 378 //assert( n_loop == add_loop, "" ); 379 if( n_loop != add_loop ) return NULL; // happens w/ evil ZKM loops 380 381 // Convert I-V into I+ (0-V); same for V-I 382 if( add->Opcode() == Op_SubI && 383 _igvn.type( add->in(1) ) != TypeInt::ZERO ) { 384 Node *zero = _igvn.intcon(0); 385 set_ctrl(zero, C->root()); 386 Node *neg = new (C) SubINode( _igvn.intcon(0), add->in(2) ); 387 register_new_node( neg, get_ctrl(add->in(2) ) ); 388 add = new (C) AddINode( add->in(1), neg ); 389 register_new_node( add, add_ctrl ); 390 } 391 if( add->Opcode() != Op_AddI ) return NULL; 392 // See if one add input is loop invariant 393 Node *add_var = add->in(1); 394 Node *add_var_ctrl = get_ctrl(add_var); 395 IdealLoopTree *add_var_loop = get_loop(add_var_ctrl ); 396 Node *add_invar = add->in(2); 397 Node *add_invar_ctrl = get_ctrl(add_invar); 398 IdealLoopTree *add_invar_loop = get_loop(add_invar_ctrl ); 399 if( add_var_loop == n_loop ) { 400 } else if( add_invar_loop == n_loop ) { 401 // Swap to find the invariant part 402 add_invar = add_var; 403 add_invar_ctrl = add_var_ctrl; 404 add_invar_loop = add_var_loop; 405 add_var = add->in(2); 406 Node *add_var_ctrl = get_ctrl(add_var); 407 IdealLoopTree *add_var_loop = get_loop(add_var_ctrl ); 408 } else // Else neither input is loop invariant 409 return NULL; 410 if( n_loop == add_invar_loop || !add_invar_loop->is_member( n_loop ) ) 411 return NULL; // No invariant part of the add? 412 413 // Yes! Reshape address expression! 414 Node *inv_scale = new (C) LShiftINode( add_invar, scale ); 415 Node *inv_scale_ctrl = 416 dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ? 417 add_invar_ctrl : scale_ctrl; 418 register_new_node( inv_scale, inv_scale_ctrl ); 419 Node *var_scale = new (C) LShiftINode( add_var, scale ); 420 register_new_node( var_scale, n_ctrl ); 421 Node *var_add = new (C) AddINode( var_scale, inv_scale ); 422 register_new_node( var_add, n_ctrl ); 423 _igvn.replace_node( n, var_add ); 424 return var_add; 425 } 426 427 // Replace (I+V) with (V+I) 428 if( n_op == Op_AddI || 429 n_op == Op_AddL || 430 n_op == Op_AddF || 431 n_op == Op_AddD || 432 n_op == Op_MulI || 433 n_op == Op_MulL || 434 n_op == Op_MulF || 435 n_op == Op_MulD ) { 436 if( n2_loop == n_loop ) { 437 assert( n1_loop != n_loop, "" ); 438 n->swap_edges(1, 2); 439 } 440 } 441 442 // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V), 443 // but not if I2 is a constant. 444 if( n_op == Op_AddP ) { 445 if( n2_loop == n_loop && n3_loop != n_loop ) { 446 if( n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con() ) { 447 Node *n22_ctrl = get_ctrl(n->in(2)->in(2)); 448 Node *n23_ctrl = get_ctrl(n->in(2)->in(3)); 449 IdealLoopTree *n22loop = get_loop( n22_ctrl ); 450 IdealLoopTree *n23_loop = get_loop( n23_ctrl ); 451 if( n22loop != n_loop && n22loop->is_member(n_loop) && 452 n23_loop == n_loop ) { 453 Node *add1 = new (C) AddPNode( n->in(1), n->in(2)->in(2), n->in(3) ); 454 // Stuff new AddP in the loop preheader 455 register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) ); 456 Node *add2 = new (C) AddPNode( n->in(1), add1, n->in(2)->in(3) ); 457 register_new_node( add2, n_ctrl ); 458 _igvn.replace_node( n, add2 ); 459 return add2; 460 } 461 } 462 } 463 464 // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V) 465 if( n2_loop != n_loop && n3_loop == n_loop ) { 466 if( n->in(3)->Opcode() == Op_AddI ) { 467 Node *V = n->in(3)->in(1); 468 Node *I = n->in(3)->in(2); 469 if( is_member(n_loop,get_ctrl(V)) ) { 470 } else { 471 Node *tmp = V; V = I; I = tmp; 472 } 473 if( !is_member(n_loop,get_ctrl(I)) ) { 474 Node *add1 = new (C) AddPNode( n->in(1), n->in(2), I ); 475 // Stuff new AddP in the loop preheader 476 register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) ); 477 Node *add2 = new (C) AddPNode( n->in(1), add1, V ); 478 register_new_node( add2, n_ctrl ); 479 _igvn.replace_node( n, add2 ); 480 return add2; 481 } 482 } 483 } 484 } 485 486 return NULL; 487 } 488 489 //------------------------------conditional_move------------------------------- 490 // Attempt to replace a Phi with a conditional move. We have some pretty 491 // strict profitability requirements. All Phis at the merge point must 492 // be converted, so we can remove the control flow. We need to limit the 493 // number of c-moves to a small handful. All code that was in the side-arms 494 // of the CFG diamond is now speculatively executed. This code has to be 495 // "cheap enough". We are pretty much limited to CFG diamonds that merge 496 // 1 or 2 items with a total of 1 or 2 ops executed speculatively. 497 Node *PhaseIdealLoop::conditional_move( Node *region ) { 498 499 assert(region->is_Region(), "sanity check"); 500 if (region->req() != 3) return NULL; 501 502 // Check for CFG diamond 503 Node *lp = region->in(1); 504 Node *rp = region->in(2); 505 if (!lp || !rp) return NULL; 506 Node *lp_c = lp->in(0); 507 if (lp_c == NULL || lp_c != rp->in(0) || !lp_c->is_If()) return NULL; 508 IfNode *iff = lp_c->as_If(); 509 510 // Check for ops pinned in an arm of the diamond. 511 // Can't remove the control flow in this case 512 if (lp->outcnt() > 1) return NULL; 513 if (rp->outcnt() > 1) return NULL; 514 515 IdealLoopTree* r_loop = get_loop(region); 516 assert(r_loop == get_loop(iff), "sanity"); 517 // Always convert to CMOVE if all results are used only outside this loop. 518 bool used_inside_loop = (r_loop == _ltree_root); 519 520 // Check profitability 521 int cost = 0; 522 int phis = 0; 523 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 524 Node *out = region->fast_out(i); 525 if (!out->is_Phi()) continue; // Ignore other control edges, etc 526 phis++; 527 PhiNode* phi = out->as_Phi(); 528 BasicType bt = phi->type()->basic_type(); 529 switch (bt) { 530 case T_FLOAT: 531 case T_DOUBLE: { 532 cost += Matcher::float_cmove_cost(); // Could be very expensive 533 break; 534 } 535 case T_LONG: { 536 cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's 537 } 538 case T_INT: // These all CMOV fine 539 case T_ADDRESS: { // (RawPtr) 540 cost++; 541 break; 542 } 543 case T_NARROWOOP: // Fall through 544 case T_OBJECT: { // Base oops are OK, but not derived oops 545 const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr(); 546 // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a 547 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus 548 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we 549 // have a Phi for the base here that we convert to a CMOVE all is well 550 // and good. But if the base is dead, we'll not make a CMOVE. Later 551 // the allocator will have to produce a base by creating a CMOVE of the 552 // relevant bases. This puts the allocator in the business of 553 // manufacturing expensive instructions, generally a bad plan. 554 // Just Say No to Conditionally-Moved Derived Pointers. 555 if (tp && tp->offset() != 0) 556 return NULL; 557 cost++; 558 break; 559 } 560 default: 561 return NULL; // In particular, can't do memory or I/O 562 } 563 // Add in cost any speculative ops 564 for (uint j = 1; j < region->req(); j++) { 565 Node *proj = region->in(j); 566 Node *inp = phi->in(j); 567 if (get_ctrl(inp) == proj) { // Found local op 568 cost++; 569 // Check for a chain of dependent ops; these will all become 570 // speculative in a CMOV. 571 for (uint k = 1; k < inp->req(); k++) 572 if (get_ctrl(inp->in(k)) == proj) 573 cost += ConditionalMoveLimit; // Too much speculative goo 574 } 575 } 576 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode. 577 // This will likely Split-If, a higher-payoff operation. 578 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) { 579 Node* use = phi->fast_out(k); 580 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr()) 581 cost += ConditionalMoveLimit; 582 // Is there a use inside the loop? 583 // Note: check only basic types since CMoveP is pinned. 584 if (!used_inside_loop && is_java_primitive(bt)) { 585 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use); 586 if (r_loop == u_loop || r_loop->is_member(u_loop)) { 587 used_inside_loop = true; 588 } 589 } 590 } 591 } 592 Node* bol = iff->in(1); 593 assert(bol->Opcode() == Op_Bool, ""); 594 int cmp_op = bol->in(1)->Opcode(); 595 // It is expensive to generate flags from a float compare. 596 // Avoid duplicated float compare. 597 if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return NULL; 598 599 float infrequent_prob = PROB_UNLIKELY_MAG(3); 600 // Ignore cost and blocks frequency if CMOVE can be moved outside the loop. 601 if (used_inside_loop) { 602 if (cost >= ConditionalMoveLimit) return NULL; // Too much goo 603 604 // BlockLayoutByFrequency optimization moves infrequent branch 605 // from hot path. No point in CMOV'ing in such case (110 is used 606 // instead of 100 to take into account not exactness of float value). 607 if (BlockLayoutByFrequency) { 608 infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f); 609 } 610 } 611 // Check for highly predictable branch. No point in CMOV'ing if 612 // we are going to predict accurately all the time. 613 if (iff->_prob < infrequent_prob || 614 iff->_prob > (1.0f - infrequent_prob)) 615 return NULL; 616 617 // -------------- 618 // Now replace all Phis with CMOV's 619 Node *cmov_ctrl = iff->in(0); 620 uint flip = (lp->Opcode() == Op_IfTrue); 621 Node_List wq; 622 while (1) { 623 PhiNode* phi = NULL; 624 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 625 Node *out = region->fast_out(i); 626 if (out->is_Phi()) { 627 phi = out->as_Phi(); 628 break; 629 } 630 } 631 if (phi == NULL) break; 632 #ifndef PRODUCT 633 if (PrintOpto && VerifyLoopOptimizations) tty->print_cr("CMOV"); 634 #endif 635 // Move speculative ops 636 wq.push(phi); 637 while (wq.size() > 0) { 638 Node *n = wq.pop(); 639 for (uint j = 1; j < n->req(); j++) { 640 Node* m = n->in(j); 641 if (m != NULL && !is_dominator(get_ctrl(m), cmov_ctrl)) { 642 #ifndef PRODUCT 643 if (PrintOpto && VerifyLoopOptimizations) { 644 tty->print(" speculate: "); 645 m->dump(); 646 } 647 #endif 648 set_ctrl(m, cmov_ctrl); 649 wq.push(m); 650 } 651 } 652 } 653 Node *cmov = CMoveNode::make( C, cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi) ); 654 register_new_node( cmov, cmov_ctrl ); 655 _igvn.replace_node( phi, cmov ); 656 #ifndef PRODUCT 657 if (TraceLoopOpts) { 658 tty->print("CMOV "); 659 r_loop->dump_head(); 660 if (Verbose) { 661 bol->in(1)->dump(1); 662 cmov->dump(1); 663 } 664 } 665 if (VerifyLoopOptimizations) verify(); 666 #endif 667 } 668 669 // The useless CFG diamond will fold up later; see the optimization in 670 // RegionNode::Ideal. 671 _igvn._worklist.push(region); 672 673 return iff->in(1); 674 } 675 676 //------------------------------split_if_with_blocks_pre----------------------- 677 // Do the real work in a non-recursive function. Data nodes want to be 678 // cloned in the pre-order so they can feed each other nicely. 679 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) { 680 // Cloning these guys is unlikely to win 681 int n_op = n->Opcode(); 682 if( n_op == Op_MergeMem ) return n; 683 if( n->is_Proj() ) return n; 684 // Do not clone-up CmpFXXX variations, as these are always 685 // followed by a CmpI 686 if( n->is_Cmp() ) return n; 687 // Attempt to use a conditional move instead of a phi/branch 688 if( ConditionalMoveLimit > 0 && n_op == Op_Region ) { 689 Node *cmov = conditional_move( n ); 690 if( cmov ) return cmov; 691 } 692 if( n->is_CFG() || n->is_LoadStore() ) 693 return n; 694 if( n_op == Op_Opaque1 || // Opaque nodes cannot be mod'd 695 n_op == Op_Opaque2 ) { 696 if( !C->major_progress() ) // If chance of no more loop opts... 697 _igvn._worklist.push(n); // maybe we'll remove them 698 return n; 699 } 700 701 if( n->is_Con() ) return n; // No cloning for Con nodes 702 703 Node *n_ctrl = get_ctrl(n); 704 if( !n_ctrl ) return n; // Dead node 705 706 // Attempt to remix address expressions for loop invariants 707 Node *m = remix_address_expressions( n ); 708 if( m ) return m; 709 710 // Determine if the Node has inputs from some local Phi. 711 // Returns the block to clone thru. 712 Node *n_blk = has_local_phi_input( n ); 713 if( !n_blk ) return n; 714 // Do not clone the trip counter through on a CountedLoop 715 // (messes up the canonical shape). 716 if( n_blk->is_CountedLoop() && n->Opcode() == Op_AddI ) return n; 717 718 // Check for having no control input; not pinned. Allow 719 // dominating control. 720 if( n->in(0) ) { 721 Node *dom = idom(n_blk); 722 if( dom_lca( n->in(0), dom ) != n->in(0) ) 723 return n; 724 } 725 // Policy: when is it profitable. You must get more wins than 726 // policy before it is considered profitable. Policy is usually 0, 727 // so 1 win is considered profitable. Big merges will require big 728 // cloning, so get a larger policy. 729 int policy = n_blk->req() >> 2; 730 731 // If the loop is a candidate for range check elimination, 732 // delay splitting through it's phi until a later loop optimization 733 if (n_blk->is_CountedLoop()) { 734 IdealLoopTree *lp = get_loop(n_blk); 735 if (lp && lp->_rce_candidate) { 736 return n; 737 } 738 } 739 740 // Use same limit as split_if_with_blocks_post 741 if( C->unique() > 35000 ) return n; // Method too big 742 743 // Split 'n' through the merge point if it is profitable 744 Node *phi = split_thru_phi( n, n_blk, policy ); 745 if (!phi) return n; 746 747 // Found a Phi to split thru! 748 // Replace 'n' with the new phi 749 _igvn.replace_node( n, phi ); 750 // Moved a load around the loop, 'en-registering' something. 751 if (n_blk->is_Loop() && n->is_Load() && 752 !phi->in(LoopNode::LoopBackControl)->is_Load()) 753 C->set_major_progress(); 754 755 // Moved a barrier around the loop, 'en-registering' something. 756 if (n_blk->is_Loop() && n->Opcode() == Op_ShenandoahLoadReferenceBarrier && 757 phi->in(LoopNode::LoopBackControl)->Opcode() != Op_ShenandoahLoadReferenceBarrier) 758 C->set_major_progress(); 759 760 return phi; 761 } 762 763 static bool merge_point_too_heavy(Compile* C, Node* region) { 764 // Bail out if the region and its phis have too many users. 765 int weight = 0; 766 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 767 weight += region->fast_out(i)->outcnt(); 768 } 769 int nodes_left = C->max_node_limit() - C->live_nodes(); 770 if (weight * 8 > nodes_left) { 771 #ifndef PRODUCT 772 if (PrintOpto) 773 tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight); 774 #endif 775 return true; 776 } else { 777 return false; 778 } 779 } 780 781 static bool merge_point_safe(Node* region) { 782 // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode 783 // having a PhiNode input. This sidesteps the dangerous case where the split 784 // ConvI2LNode may become TOP if the input Value() does not 785 // overlap the ConvI2L range, leaving a node which may not dominate its 786 // uses. 787 // A better fix for this problem can be found in the BugTraq entry, but 788 // expediency for Mantis demands this hack. 789 // 6855164: If the merge point has a FastLockNode with a PhiNode input, we stop 790 // split_if_with_blocks from splitting a block because we could not move around 791 // the FastLockNode. 792 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 793 Node* n = region->fast_out(i); 794 if (n->is_Phi()) { 795 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 796 Node* m = n->fast_out(j); 797 if (m->is_FastLock()) 798 return false; 799 #ifdef _LP64 800 if (m->Opcode() == Op_ConvI2L) 801 return false; 802 if (m->is_CastII() && m->isa_CastII()->has_range_check()) { 803 return false; 804 } 805 #endif 806 } 807 } 808 } 809 return true; 810 } 811 812 813 //------------------------------place_near_use--------------------------------- 814 // Place some computation next to use but not inside inner loops. 815 // For inner loop uses move it to the preheader area. 816 Node *PhaseIdealLoop::place_near_use( Node *useblock ) const { 817 IdealLoopTree *u_loop = get_loop( useblock ); 818 return (u_loop->_irreducible || u_loop->_child) 819 ? useblock 820 : u_loop->_head->in(LoopNode::EntryControl); 821 } 822 823 824 //------------------------------split_if_with_blocks_post---------------------- 825 // Do the real work in a non-recursive function. CFG hackery wants to be 826 // in the post-order, so it can dirty the I-DOM info and not use the dirtied 827 // info. 828 void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) { 829 830 // Cloning Cmp through Phi's involves the split-if transform. 831 // FastLock is not used by an If 832 if( n->is_Cmp() && !n->is_FastLock() ) { 833 if( C->unique() > 35000 ) return; // Method too big 834 835 // Do not do 'split-if' if irreducible loops are present. 836 if( _has_irreducible_loops ) 837 return; 838 839 Node *n_ctrl = get_ctrl(n); 840 // Determine if the Node has inputs from some local Phi. 841 // Returns the block to clone thru. 842 Node *n_blk = has_local_phi_input( n ); 843 if( n_blk != n_ctrl ) return; 844 845 if( merge_point_too_heavy(C, n_ctrl) ) 846 return; 847 848 if( n->outcnt() != 1 ) return; // Multiple bool's from 1 compare? 849 Node *bol = n->unique_out(); 850 assert( bol->is_Bool(), "expect a bool here" ); 851 if( bol->outcnt() != 1 ) return;// Multiple branches from 1 compare? 852 Node *iff = bol->unique_out(); 853 854 // Check some safety conditions 855 if( iff->is_If() ) { // Classic split-if? 856 if( iff->in(0) != n_ctrl ) return; // Compare must be in same blk as if 857 } else if (iff->is_CMove()) { // Trying to split-up a CMOVE 858 // Can't split CMove with different control edge. 859 if (iff->in(0) != NULL && iff->in(0) != n_ctrl ) return; 860 if( get_ctrl(iff->in(2)) == n_ctrl || 861 get_ctrl(iff->in(3)) == n_ctrl ) 862 return; // Inputs not yet split-up 863 if ( get_loop(n_ctrl) != get_loop(get_ctrl(iff)) ) { 864 return; // Loop-invar test gates loop-varying CMOVE 865 } 866 } else { 867 return; // some other kind of node, such as an Allocate 868 } 869 870 // Do not do 'split-if' if some paths are dead. First do dead code 871 // elimination and then see if its still profitable. 872 for( uint i = 1; i < n_ctrl->req(); i++ ) 873 if( n_ctrl->in(i) == C->top() ) 874 return; 875 876 // When is split-if profitable? Every 'win' on means some control flow 877 // goes dead, so it's almost always a win. 878 int policy = 0; 879 // If trying to do a 'Split-If' at the loop head, it is only 880 // profitable if the cmp folds up on BOTH paths. Otherwise we 881 // risk peeling a loop forever. 882 883 // CNC - Disabled for now. Requires careful handling of loop 884 // body selection for the cloned code. Also, make sure we check 885 // for any input path not being in the same loop as n_ctrl. For 886 // irreducible loops we cannot check for 'n_ctrl->is_Loop()' 887 // because the alternative loop entry points won't be converted 888 // into LoopNodes. 889 IdealLoopTree *n_loop = get_loop(n_ctrl); 890 for( uint j = 1; j < n_ctrl->req(); j++ ) 891 if( get_loop(n_ctrl->in(j)) != n_loop ) 892 return; 893 894 // Check for safety of the merge point. 895 if( !merge_point_safe(n_ctrl) ) { 896 return; 897 } 898 899 // Split compare 'n' through the merge point if it is profitable 900 Node *phi = split_thru_phi( n, n_ctrl, policy ); 901 if( !phi ) return; 902 903 // Found a Phi to split thru! 904 // Replace 'n' with the new phi 905 _igvn.replace_node( n, phi ); 906 907 // Now split the bool up thru the phi 908 Node *bolphi = split_thru_phi( bol, n_ctrl, -1 ); 909 guarantee(bolphi != NULL, "null boolean phi node"); 910 911 _igvn.replace_node( bol, bolphi ); 912 assert( iff->in(1) == bolphi, "" ); 913 914 if( bolphi->Value(&_igvn)->singleton() ) 915 return; 916 917 // Conditional-move? Must split up now 918 if( !iff->is_If() ) { 919 Node *cmovphi = split_thru_phi( iff, n_ctrl, -1 ); 920 _igvn.replace_node( iff, cmovphi ); 921 return; 922 } 923 924 // Now split the IF 925 do_split_if( iff ); 926 return; 927 } 928 929 // Check for an IF ready to split; one that has its 930 // condition codes input coming from a Phi at the block start. 931 int n_op = n->Opcode(); 932 933 // Check for an IF being dominated by another IF same test 934 if (n_op == Op_If) { 935 Node *bol = n->in(1); 936 uint max = bol->outcnt(); 937 // Check for same test used more than once? 938 if (max > 1 && bol->is_Bool()) { 939 // Search up IDOMs to see if this IF is dominated. 940 Node *cutoff = get_ctrl(bol); 941 942 // Now search up IDOMs till cutoff, looking for a dominating test 943 Node *prevdom = n; 944 Node *dom = idom(prevdom); 945 while (dom != cutoff) { 946 if (dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom) { 947 // Replace the dominated test with an obvious true or false. 948 // Place it on the IGVN worklist for later cleanup. 949 C->set_major_progress(); 950 dominated_by(prevdom, n, false, true); 951 #ifndef PRODUCT 952 if( VerifyLoopOptimizations ) verify(); 953 #endif 954 return; 955 } 956 prevdom = dom; 957 dom = idom(prevdom); 958 } 959 } 960 } 961 962 // See if a shared loop-varying computation has no loop-varying uses. 963 // Happens if something is only used for JVM state in uncommon trap exits, 964 // like various versions of induction variable+offset. Clone the 965 // computation per usage to allow it to sink out of the loop. 966 if (has_ctrl(n) && !n->in(0)) {// n not dead and has no control edge (can float about) 967 Node *n_ctrl = get_ctrl(n); 968 IdealLoopTree *n_loop = get_loop(n_ctrl); 969 if( n_loop != _ltree_root ) { 970 DUIterator_Fast imax, i = n->fast_outs(imax); 971 for (; i < imax; i++) { 972 Node* u = n->fast_out(i); 973 if( !has_ctrl(u) ) break; // Found control user 974 IdealLoopTree *u_loop = get_loop(get_ctrl(u)); 975 if( u_loop == n_loop ) break; // Found loop-varying use 976 if( n_loop->is_member( u_loop ) ) break; // Found use in inner loop 977 if( u->Opcode() == Op_Opaque1 ) break; // Found loop limit, bugfix for 4677003 978 } 979 bool did_break = (i < imax); // Did we break out of the previous loop? 980 if (!did_break && n->outcnt() > 1) { // All uses in outer loops! 981 Node *late_load_ctrl = NULL; 982 if (n->is_Load()) { 983 // If n is a load, get and save the result from get_late_ctrl(), 984 // to be later used in calculating the control for n's clones. 985 clear_dom_lca_tags(); 986 late_load_ctrl = get_late_ctrl(n, n_ctrl); 987 } 988 // If n is a load, and the late control is the same as the current 989 // control, then the cloning of n is a pointless exercise, because 990 // GVN will ensure that we end up where we started. 991 if (!n->is_Load() || late_load_ctrl != n_ctrl) { 992 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; ) { 993 Node *u = n->last_out(j); // Clone private computation per use 994 _igvn.rehash_node_delayed(u); 995 Node *x = n->clone(); // Clone computation 996 Node *x_ctrl = NULL; 997 if( u->is_Phi() ) { 998 // Replace all uses of normal nodes. Replace Phi uses 999 // individually, so the separate Nodes can sink down 1000 // different paths. 1001 uint k = 1; 1002 while( u->in(k) != n ) k++; 1003 u->set_req( k, x ); 1004 // x goes next to Phi input path 1005 x_ctrl = u->in(0)->in(k); 1006 --j; 1007 } else { // Normal use 1008 // Replace all uses 1009 for( uint k = 0; k < u->req(); k++ ) { 1010 if( u->in(k) == n ) { 1011 u->set_req( k, x ); 1012 --j; 1013 } 1014 } 1015 x_ctrl = get_ctrl(u); 1016 } 1017 1018 // Find control for 'x' next to use but not inside inner loops. 1019 // For inner loop uses get the preheader area. 1020 x_ctrl = place_near_use(x_ctrl); 1021 1022 if (n->is_Load()) { 1023 // For loads, add a control edge to a CFG node outside of the loop 1024 // to force them to not combine and return back inside the loop 1025 // during GVN optimization (4641526). 1026 // 1027 // Because we are setting the actual control input, factor in 1028 // the result from get_late_ctrl() so we respect any 1029 // anti-dependences. (6233005). 1030 x_ctrl = dom_lca(late_load_ctrl, x_ctrl); 1031 1032 // Don't allow the control input to be a CFG splitting node. 1033 // Such nodes should only have ProjNodes as outs, e.g. IfNode 1034 // should only have IfTrueNode and IfFalseNode (4985384). 1035 x_ctrl = find_non_split_ctrl(x_ctrl); 1036 assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone"); 1037 1038 x->set_req(0, x_ctrl); 1039 } 1040 register_new_node(x, x_ctrl); 1041 1042 // Some institutional knowledge is needed here: 'x' is 1043 // yanked because if the optimizer runs GVN on it all the 1044 // cloned x's will common up and undo this optimization and 1045 // be forced back in the loop. This is annoying because it 1046 // makes +VerifyOpto report false-positives on progress. I 1047 // tried setting control edges on the x's to force them to 1048 // not combine, but the matching gets worried when it tries 1049 // to fold a StoreP and an AddP together (as part of an 1050 // address expression) and the AddP and StoreP have 1051 // different controls. 1052 if (!x->is_Load() && !x->is_DecodeNarrowPtr()) _igvn._worklist.yank(x); 1053 } 1054 _igvn.remove_dead_node(n); 1055 } 1056 } 1057 } 1058 } 1059 1060 // Check for Opaque2's who's loop has disappeared - who's input is in the 1061 // same loop nest as their output. Remove 'em, they are no longer useful. 1062 if( n_op == Op_Opaque2 && 1063 n->in(1) != NULL && 1064 get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) { 1065 _igvn.replace_node( n, n->in(1) ); 1066 } 1067 } 1068 1069 //------------------------------split_if_with_blocks--------------------------- 1070 // Check for aggressive application of 'split-if' optimization, 1071 // using basic block level info. 1072 void PhaseIdealLoop::split_if_with_blocks( VectorSet &visited, Node_Stack &nstack ) { 1073 Node *n = C->root(); 1074 visited.set(n->_idx); // first, mark node as visited 1075 // Do pre-visit work for root 1076 n = split_if_with_blocks_pre( n ); 1077 uint cnt = n->outcnt(); 1078 uint i = 0; 1079 while (true) { 1080 // Visit all children 1081 if (i < cnt) { 1082 Node* use = n->raw_out(i); 1083 ++i; 1084 if (use->outcnt() != 0 && !visited.test_set(use->_idx)) { 1085 // Now do pre-visit work for this use 1086 use = split_if_with_blocks_pre( use ); 1087 nstack.push(n, i); // Save parent and next use's index. 1088 n = use; // Process all children of current use. 1089 cnt = use->outcnt(); 1090 i = 0; 1091 } 1092 } 1093 else { 1094 // All of n's children have been processed, complete post-processing. 1095 if (cnt != 0 && !n->is_Con()) { 1096 assert(has_node(n), "no dead nodes"); 1097 split_if_with_blocks_post( n ); 1098 } 1099 if (nstack.is_empty()) { 1100 // Finished all nodes on stack. 1101 break; 1102 } 1103 // Get saved parent node and next use's index. Visit the rest of uses. 1104 n = nstack.node(); 1105 cnt = n->outcnt(); 1106 i = nstack.index(); 1107 nstack.pop(); 1108 } 1109 } 1110 } 1111 1112 1113 //============================================================================= 1114 // 1115 // C L O N E A L O O P B O D Y 1116 // 1117 1118 //------------------------------clone_iff-------------------------------------- 1119 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. 1120 // "Nearly" because all Nodes have been cloned from the original in the loop, 1121 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs 1122 // through the Phi recursively, and return a Bool. 1123 BoolNode *PhaseIdealLoop::clone_iff( PhiNode *phi, IdealLoopTree *loop ) { 1124 1125 // Convert this Phi into a Phi merging Bools 1126 uint i; 1127 for( i = 1; i < phi->req(); i++ ) { 1128 Node *b = phi->in(i); 1129 if( b->is_Phi() ) { 1130 _igvn.replace_input_of(phi, i, clone_iff( b->as_Phi(), loop )); 1131 } else { 1132 assert( b->is_Bool(), "" ); 1133 } 1134 } 1135 1136 Node *sample_bool = phi->in(1); 1137 Node *sample_cmp = sample_bool->in(1); 1138 1139 // Make Phis to merge the Cmp's inputs. 1140 PhiNode *phi1 = new (C) PhiNode( phi->in(0), Type::TOP ); 1141 PhiNode *phi2 = new (C) PhiNode( phi->in(0), Type::TOP ); 1142 for( i = 1; i < phi->req(); i++ ) { 1143 Node *n1 = phi->in(i)->in(1)->in(1); 1144 Node *n2 = phi->in(i)->in(1)->in(2); 1145 phi1->set_req( i, n1 ); 1146 phi2->set_req( i, n2 ); 1147 phi1->set_type( phi1->type()->meet_speculative(n1->bottom_type())); 1148 phi2->set_type( phi2->type()->meet_speculative(n2->bottom_type())); 1149 } 1150 // See if these Phis have been made before. 1151 // Register with optimizer 1152 Node *hit1 = _igvn.hash_find_insert(phi1); 1153 if( hit1 ) { // Hit, toss just made Phi 1154 _igvn.remove_dead_node(phi1); // Remove new phi 1155 assert( hit1->is_Phi(), "" ); 1156 phi1 = (PhiNode*)hit1; // Use existing phi 1157 } else { // Miss 1158 _igvn.register_new_node_with_optimizer(phi1); 1159 } 1160 Node *hit2 = _igvn.hash_find_insert(phi2); 1161 if( hit2 ) { // Hit, toss just made Phi 1162 _igvn.remove_dead_node(phi2); // Remove new phi 1163 assert( hit2->is_Phi(), "" ); 1164 phi2 = (PhiNode*)hit2; // Use existing phi 1165 } else { // Miss 1166 _igvn.register_new_node_with_optimizer(phi2); 1167 } 1168 // Register Phis with loop/block info 1169 set_ctrl(phi1, phi->in(0)); 1170 set_ctrl(phi2, phi->in(0)); 1171 // Make a new Cmp 1172 Node *cmp = sample_cmp->clone(); 1173 cmp->set_req( 1, phi1 ); 1174 cmp->set_req( 2, phi2 ); 1175 _igvn.register_new_node_with_optimizer(cmp); 1176 set_ctrl(cmp, phi->in(0)); 1177 1178 // Make a new Bool 1179 Node *b = sample_bool->clone(); 1180 b->set_req(1,cmp); 1181 _igvn.register_new_node_with_optimizer(b); 1182 set_ctrl(b, phi->in(0)); 1183 1184 assert( b->is_Bool(), "" ); 1185 return (BoolNode*)b; 1186 } 1187 1188 //------------------------------clone_bool------------------------------------- 1189 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. 1190 // "Nearly" because all Nodes have been cloned from the original in the loop, 1191 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs 1192 // through the Phi recursively, and return a Bool. 1193 CmpNode *PhaseIdealLoop::clone_bool( PhiNode *phi, IdealLoopTree *loop ) { 1194 uint i; 1195 // Convert this Phi into a Phi merging Bools 1196 for( i = 1; i < phi->req(); i++ ) { 1197 Node *b = phi->in(i); 1198 if( b->is_Phi() ) { 1199 _igvn.replace_input_of(phi, i, clone_bool( b->as_Phi(), loop )); 1200 } else { 1201 assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" ); 1202 } 1203 } 1204 1205 Node *sample_cmp = phi->in(1); 1206 1207 // Make Phis to merge the Cmp's inputs. 1208 PhiNode *phi1 = new (C) PhiNode( phi->in(0), Type::TOP ); 1209 PhiNode *phi2 = new (C) PhiNode( phi->in(0), Type::TOP ); 1210 for( uint j = 1; j < phi->req(); j++ ) { 1211 Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP 1212 Node *n1, *n2; 1213 if( cmp_top->is_Cmp() ) { 1214 n1 = cmp_top->in(1); 1215 n2 = cmp_top->in(2); 1216 } else { 1217 n1 = n2 = cmp_top; 1218 } 1219 phi1->set_req( j, n1 ); 1220 phi2->set_req( j, n2 ); 1221 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type())); 1222 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type())); 1223 } 1224 1225 // See if these Phis have been made before. 1226 // Register with optimizer 1227 Node *hit1 = _igvn.hash_find_insert(phi1); 1228 if( hit1 ) { // Hit, toss just made Phi 1229 _igvn.remove_dead_node(phi1); // Remove new phi 1230 assert( hit1->is_Phi(), "" ); 1231 phi1 = (PhiNode*)hit1; // Use existing phi 1232 } else { // Miss 1233 _igvn.register_new_node_with_optimizer(phi1); 1234 } 1235 Node *hit2 = _igvn.hash_find_insert(phi2); 1236 if( hit2 ) { // Hit, toss just made Phi 1237 _igvn.remove_dead_node(phi2); // Remove new phi 1238 assert( hit2->is_Phi(), "" ); 1239 phi2 = (PhiNode*)hit2; // Use existing phi 1240 } else { // Miss 1241 _igvn.register_new_node_with_optimizer(phi2); 1242 } 1243 // Register Phis with loop/block info 1244 set_ctrl(phi1, phi->in(0)); 1245 set_ctrl(phi2, phi->in(0)); 1246 // Make a new Cmp 1247 Node *cmp = sample_cmp->clone(); 1248 cmp->set_req( 1, phi1 ); 1249 cmp->set_req( 2, phi2 ); 1250 _igvn.register_new_node_with_optimizer(cmp); 1251 set_ctrl(cmp, phi->in(0)); 1252 1253 assert( cmp->is_Cmp(), "" ); 1254 return (CmpNode*)cmp; 1255 } 1256 1257 //------------------------------sink_use--------------------------------------- 1258 // If 'use' was in the loop-exit block, it now needs to be sunk 1259 // below the post-loop merge point. 1260 void PhaseIdealLoop::sink_use( Node *use, Node *post_loop ) { 1261 if (!use->is_CFG() && get_ctrl(use) == post_loop->in(2)) { 1262 set_ctrl(use, post_loop); 1263 for (DUIterator j = use->outs(); use->has_out(j); j++) 1264 sink_use(use->out(j), post_loop); 1265 } 1266 } 1267 1268 //------------------------------clone_loop------------------------------------- 1269 // 1270 // C L O N E A L O O P B O D Y 1271 // 1272 // This is the basic building block of the loop optimizations. It clones an 1273 // entire loop body. It makes an old_new loop body mapping; with this mapping 1274 // you can find the new-loop equivalent to an old-loop node. All new-loop 1275 // nodes are exactly equal to their old-loop counterparts, all edges are the 1276 // same. All exits from the old-loop now have a RegionNode that merges the 1277 // equivalent new-loop path. This is true even for the normal "loop-exit" 1278 // condition. All uses of loop-invariant old-loop values now come from (one 1279 // or more) Phis that merge their new-loop equivalents. 1280 // 1281 // This operation leaves the graph in an illegal state: there are two valid 1282 // control edges coming from the loop pre-header to both loop bodies. I'll 1283 // definitely have to hack the graph after running this transform. 1284 // 1285 // From this building block I will further edit edges to perform loop peeling 1286 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc. 1287 // 1288 // Parameter side_by_size_idom: 1289 // When side_by_size_idom is NULL, the dominator tree is constructed for 1290 // the clone loop to dominate the original. Used in construction of 1291 // pre-main-post loop sequence. 1292 // When nonnull, the clone and original are side-by-side, both are 1293 // dominated by the side_by_side_idom node. Used in construction of 1294 // unswitched loops. 1295 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd, 1296 Node* side_by_side_idom) { 1297 1298 // Step 1: Clone the loop body. Make the old->new mapping. 1299 uint i; 1300 for( i = 0; i < loop->_body.size(); i++ ) { 1301 Node *old = loop->_body.at(i); 1302 Node *nnn = old->clone(); 1303 old_new.map( old->_idx, nnn ); 1304 _igvn.register_new_node_with_optimizer(nnn); 1305 } 1306 1307 1308 // Step 2: Fix the edges in the new body. If the old input is outside the 1309 // loop use it. If the old input is INside the loop, use the corresponding 1310 // new node instead. 1311 for( i = 0; i < loop->_body.size(); i++ ) { 1312 Node *old = loop->_body.at(i); 1313 Node *nnn = old_new[old->_idx]; 1314 // Fix CFG/Loop controlling the new node 1315 if (has_ctrl(old)) { 1316 set_ctrl(nnn, old_new[get_ctrl(old)->_idx]); 1317 } else { 1318 set_loop(nnn, loop->_parent); 1319 if (old->outcnt() > 0) { 1320 set_idom( nnn, old_new[idom(old)->_idx], dd ); 1321 } 1322 } 1323 // Correct edges to the new node 1324 for( uint j = 0; j < nnn->req(); j++ ) { 1325 Node *n = nnn->in(j); 1326 if( n ) { 1327 IdealLoopTree *old_in_loop = get_loop( has_ctrl(n) ? get_ctrl(n) : n ); 1328 if( loop->is_member( old_in_loop ) ) 1329 nnn->set_req(j, old_new[n->_idx]); 1330 } 1331 } 1332 _igvn.hash_find_insert(nnn); 1333 } 1334 Node *newhead = old_new[loop->_head->_idx]; 1335 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd); 1336 1337 1338 // Step 3: Now fix control uses. Loop varying control uses have already 1339 // been fixed up (as part of all input edges in Step 2). Loop invariant 1340 // control uses must be either an IfFalse or an IfTrue. Make a merge 1341 // point to merge the old and new IfFalse/IfTrue nodes; make the use 1342 // refer to this. 1343 ResourceArea *area = Thread::current()->resource_area(); 1344 Node_List worklist(area); 1345 uint new_counter = C->unique(); 1346 for( i = 0; i < loop->_body.size(); i++ ) { 1347 Node* old = loop->_body.at(i); 1348 if( !old->is_CFG() ) continue; 1349 Node* nnn = old_new[old->_idx]; 1350 1351 // Copy uses to a worklist, so I can munge the def-use info 1352 // with impunity. 1353 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) 1354 worklist.push(old->fast_out(j)); 1355 1356 while( worklist.size() ) { // Visit all uses 1357 Node *use = worklist.pop(); 1358 if (!has_node(use)) continue; // Ignore dead nodes 1359 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use ); 1360 if( !loop->is_member( use_loop ) && use->is_CFG() ) { 1361 // Both OLD and USE are CFG nodes here. 1362 assert( use->is_Proj(), "" ); 1363 1364 // Clone the loop exit control projection 1365 Node *newuse = use->clone(); 1366 newuse->set_req(0,nnn); 1367 _igvn.register_new_node_with_optimizer(newuse); 1368 set_loop(newuse, use_loop); 1369 set_idom(newuse, nnn, dom_depth(nnn) + 1 ); 1370 1371 // We need a Region to merge the exit from the peeled body and the 1372 // exit from the old loop body. 1373 RegionNode *r = new (C) RegionNode(3); 1374 // Map the old use to the new merge point 1375 old_new.map( use->_idx, r ); 1376 uint dd_r = MIN2(dom_depth(newuse),dom_depth(use)); 1377 assert( dd_r >= dom_depth(dom_lca(newuse,use)), "" ); 1378 1379 // The original user of 'use' uses 'r' instead. 1380 for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) { 1381 Node* useuse = use->last_out(l); 1382 _igvn.rehash_node_delayed(useuse); 1383 uint uses_found = 0; 1384 if( useuse->in(0) == use ) { 1385 useuse->set_req(0, r); 1386 uses_found++; 1387 if( useuse->is_CFG() ) { 1388 assert( dom_depth(useuse) > dd_r, "" ); 1389 set_idom(useuse, r, dom_depth(useuse)); 1390 } 1391 } 1392 for( uint k = 1; k < useuse->req(); k++ ) { 1393 if( useuse->in(k) == use ) { 1394 useuse->set_req(k, r); 1395 uses_found++; 1396 } 1397 } 1398 l -= uses_found; // we deleted 1 or more copies of this edge 1399 } 1400 1401 // Now finish up 'r' 1402 r->set_req( 1, newuse ); 1403 r->set_req( 2, use ); 1404 _igvn.register_new_node_with_optimizer(r); 1405 set_loop(r, use_loop); 1406 set_idom(r, !side_by_side_idom ? newuse->in(0) : side_by_side_idom, dd_r); 1407 } // End of if a loop-exit test 1408 } 1409 } 1410 1411 // Step 4: If loop-invariant use is not control, it must be dominated by a 1412 // loop exit IfFalse/IfTrue. Find "proper" loop exit. Make a Region 1413 // there if needed. Make a Phi there merging old and new used values. 1414 Node_List *split_if_set = NULL; 1415 Node_List *split_bool_set = NULL; 1416 Node_List *split_cex_set = NULL; 1417 for( i = 0; i < loop->_body.size(); i++ ) { 1418 Node* old = loop->_body.at(i); 1419 Node* nnn = old_new[old->_idx]; 1420 // Copy uses to a worklist, so I can munge the def-use info 1421 // with impunity. 1422 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) 1423 worklist.push(old->fast_out(j)); 1424 1425 while( worklist.size() ) { 1426 Node *use = worklist.pop(); 1427 if (!has_node(use)) continue; // Ignore dead nodes 1428 if (use->in(0) == C->top()) continue; 1429 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use ); 1430 // Check for data-use outside of loop - at least one of OLD or USE 1431 // must not be a CFG node. 1432 if( !loop->is_member( use_loop ) && (!old->is_CFG() || !use->is_CFG())) { 1433 1434 // If the Data use is an IF, that means we have an IF outside of the 1435 // loop that is switching on a condition that is set inside of the 1436 // loop. Happens if people set a loop-exit flag; then test the flag 1437 // in the loop to break the loop, then test is again outside of the 1438 // loop to determine which way the loop exited. 1439 // Loop predicate If node connects to Bool node through Opaque1 node. 1440 if (use->is_If() || use->is_CMove() || C->is_predicate_opaq(use)) { 1441 // Since this code is highly unlikely, we lazily build the worklist 1442 // of such Nodes to go split. 1443 if( !split_if_set ) 1444 split_if_set = new Node_List(area); 1445 split_if_set->push(use); 1446 } 1447 if( use->is_Bool() ) { 1448 if( !split_bool_set ) 1449 split_bool_set = new Node_List(area); 1450 split_bool_set->push(use); 1451 } 1452 if( use->Opcode() == Op_CreateEx ) { 1453 if( !split_cex_set ) 1454 split_cex_set = new Node_List(area); 1455 split_cex_set->push(use); 1456 } 1457 1458 1459 // Get "block" use is in 1460 uint idx = 0; 1461 while( use->in(idx) != old ) idx++; 1462 Node *prev = use->is_CFG() ? use : get_ctrl(use); 1463 assert( !loop->is_member( get_loop( prev ) ), "" ); 1464 Node *cfg = prev->_idx >= new_counter 1465 ? prev->in(2) 1466 : idom(prev); 1467 if( use->is_Phi() ) // Phi use is in prior block 1468 cfg = prev->in(idx); // NOT in block of Phi itself 1469 if (cfg->is_top()) { // Use is dead? 1470 _igvn.replace_input_of(use, idx, C->top()); 1471 continue; 1472 } 1473 1474 while( !loop->is_member( get_loop( cfg ) ) ) { 1475 prev = cfg; 1476 cfg = cfg->_idx >= new_counter ? cfg->in(2) : idom(cfg); 1477 } 1478 // If the use occurs after merging several exits from the loop, then 1479 // old value must have dominated all those exits. Since the same old 1480 // value was used on all those exits we did not need a Phi at this 1481 // merge point. NOW we do need a Phi here. Each loop exit value 1482 // is now merged with the peeled body exit; each exit gets its own 1483 // private Phi and those Phis need to be merged here. 1484 Node *phi; 1485 if( prev->is_Region() ) { 1486 if( idx == 0 ) { // Updating control edge? 1487 phi = prev; // Just use existing control 1488 } else { // Else need a new Phi 1489 phi = PhiNode::make( prev, old ); 1490 // Now recursively fix up the new uses of old! 1491 for( uint i = 1; i < prev->req(); i++ ) { 1492 worklist.push(phi); // Onto worklist once for each 'old' input 1493 } 1494 } 1495 } else { 1496 // Get new RegionNode merging old and new loop exits 1497 prev = old_new[prev->_idx]; 1498 assert( prev, "just made this in step 7" ); 1499 if( idx == 0 ) { // Updating control edge? 1500 phi = prev; // Just use existing control 1501 } else { // Else need a new Phi 1502 // Make a new Phi merging data values properly 1503 phi = PhiNode::make( prev, old ); 1504 phi->set_req( 1, nnn ); 1505 } 1506 } 1507 // If inserting a new Phi, check for prior hits 1508 if( idx != 0 ) { 1509 Node *hit = _igvn.hash_find_insert(phi); 1510 if( hit == NULL ) { 1511 _igvn.register_new_node_with_optimizer(phi); // Register new phi 1512 } else { // or 1513 // Remove the new phi from the graph and use the hit 1514 _igvn.remove_dead_node(phi); 1515 phi = hit; // Use existing phi 1516 } 1517 set_ctrl(phi, prev); 1518 } 1519 // Make 'use' use the Phi instead of the old loop body exit value 1520 _igvn.replace_input_of(use, idx, phi); 1521 if( use->_idx >= new_counter ) { // If updating new phis 1522 // Not needed for correctness, but prevents a weak assert 1523 // in AddPNode from tripping (when we end up with different 1524 // base & derived Phis that will become the same after 1525 // IGVN does CSE). 1526 Node *hit = _igvn.hash_find_insert(use); 1527 if( hit ) // Go ahead and re-hash for hits. 1528 _igvn.replace_node( use, hit ); 1529 } 1530 1531 // If 'use' was in the loop-exit block, it now needs to be sunk 1532 // below the post-loop merge point. 1533 sink_use( use, prev ); 1534 } 1535 } 1536 } 1537 1538 // Check for IFs that need splitting/cloning. Happens if an IF outside of 1539 // the loop uses a condition set in the loop. The original IF probably 1540 // takes control from one or more OLD Regions (which in turn get from NEW 1541 // Regions). In any case, there will be a set of Phis for each merge point 1542 // from the IF up to where the original BOOL def exists the loop. 1543 if( split_if_set ) { 1544 while( split_if_set->size() ) { 1545 Node *iff = split_if_set->pop(); 1546 if( iff->in(1)->is_Phi() ) { 1547 BoolNode *b = clone_iff( iff->in(1)->as_Phi(), loop ); 1548 _igvn.replace_input_of(iff, 1, b); 1549 } 1550 } 1551 } 1552 if( split_bool_set ) { 1553 while( split_bool_set->size() ) { 1554 Node *b = split_bool_set->pop(); 1555 Node *phi = b->in(1); 1556 assert( phi->is_Phi(), "" ); 1557 CmpNode *cmp = clone_bool( (PhiNode*)phi, loop ); 1558 _igvn.replace_input_of(b, 1, cmp); 1559 } 1560 } 1561 if( split_cex_set ) { 1562 while( split_cex_set->size() ) { 1563 Node *b = split_cex_set->pop(); 1564 assert( b->in(0)->is_Region(), "" ); 1565 assert( b->in(1)->is_Phi(), "" ); 1566 assert( b->in(0)->in(0) == b->in(1)->in(0), "" ); 1567 split_up( b, b->in(0), NULL ); 1568 } 1569 } 1570 1571 } 1572 1573 1574 //---------------------- stride_of_possible_iv ------------------------------------- 1575 // Looks for an iff/bool/comp with one operand of the compare 1576 // being a cycle involving an add and a phi, 1577 // with an optional truncation (left-shift followed by a right-shift) 1578 // of the add. Returns zero if not an iv. 1579 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) { 1580 Node* trunc1 = NULL; 1581 Node* trunc2 = NULL; 1582 const TypeInt* ttype = NULL; 1583 if (!iff->is_If() || iff->in(1) == NULL || !iff->in(1)->is_Bool()) { 1584 return 0; 1585 } 1586 BoolNode* bl = iff->in(1)->as_Bool(); 1587 Node* cmp = bl->in(1); 1588 if (!cmp || cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU) { 1589 return 0; 1590 } 1591 // Must have an invariant operand 1592 if (is_member(get_loop(iff), get_ctrl(cmp->in(2)))) { 1593 return 0; 1594 } 1595 Node* add2 = NULL; 1596 Node* cmp1 = cmp->in(1); 1597 if (cmp1->is_Phi()) { 1598 // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) ))) 1599 Node* phi = cmp1; 1600 for (uint i = 1; i < phi->req(); i++) { 1601 Node* in = phi->in(i); 1602 Node* add = CountedLoopNode::match_incr_with_optional_truncation(in, 1603 &trunc1, &trunc2, &ttype); 1604 if (add && add->in(1) == phi) { 1605 add2 = add->in(2); 1606 break; 1607 } 1608 } 1609 } else { 1610 // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) ))) 1611 Node* addtrunc = cmp1; 1612 Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc, 1613 &trunc1, &trunc2, &ttype); 1614 if (add && add->in(1)->is_Phi()) { 1615 Node* phi = add->in(1); 1616 for (uint i = 1; i < phi->req(); i++) { 1617 if (phi->in(i) == addtrunc) { 1618 add2 = add->in(2); 1619 break; 1620 } 1621 } 1622 } 1623 } 1624 if (add2 != NULL) { 1625 const TypeInt* add2t = _igvn.type(add2)->is_int(); 1626 if (add2t->is_con()) { 1627 return add2t->get_con(); 1628 } 1629 } 1630 return 0; 1631 } 1632 1633 1634 //---------------------- stay_in_loop ------------------------------------- 1635 // Return the (unique) control output node that's in the loop (if it exists.) 1636 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) { 1637 Node* unique = NULL; 1638 if (!n) return NULL; 1639 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1640 Node* use = n->fast_out(i); 1641 if (!has_ctrl(use) && loop->is_member(get_loop(use))) { 1642 if (unique != NULL) { 1643 return NULL; 1644 } 1645 unique = use; 1646 } 1647 } 1648 return unique; 1649 } 1650 1651 //------------------------------ register_node ------------------------------------- 1652 // Utility to register node "n" with PhaseIdealLoop 1653 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth) { 1654 _igvn.register_new_node_with_optimizer(n); 1655 loop->_body.push(n); 1656 if (n->is_CFG()) { 1657 set_loop(n, loop); 1658 set_idom(n, pred, ddepth); 1659 } else { 1660 set_ctrl(n, pred); 1661 } 1662 } 1663 1664 //------------------------------ proj_clone ------------------------------------- 1665 // Utility to create an if-projection 1666 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) { 1667 ProjNode* c = p->clone()->as_Proj(); 1668 c->set_req(0, iff); 1669 return c; 1670 } 1671 1672 //------------------------------ short_circuit_if ------------------------------------- 1673 // Force the iff control output to be the live_proj 1674 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) { 1675 guarantee(live_proj != NULL, "null projection"); 1676 int proj_con = live_proj->_con; 1677 assert(proj_con == 0 || proj_con == 1, "false or true projection"); 1678 Node *con = _igvn.intcon(proj_con); 1679 set_ctrl(con, C->root()); 1680 if (iff) { 1681 iff->set_req(1, con); 1682 } 1683 return con; 1684 } 1685 1686 //------------------------------ insert_if_before_proj ------------------------------------- 1687 // Insert a new if before an if projection (* - new node) 1688 // 1689 // before 1690 // if(test) 1691 // / \ 1692 // v v 1693 // other-proj proj (arg) 1694 // 1695 // after 1696 // if(test) 1697 // / \ 1698 // / v 1699 // | * proj-clone 1700 // v | 1701 // other-proj v 1702 // * new_if(relop(cmp[IU](left,right))) 1703 // / \ 1704 // v v 1705 // * new-proj proj 1706 // (returned) 1707 // 1708 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) { 1709 IfNode* iff = proj->in(0)->as_If(); 1710 IdealLoopTree *loop = get_loop(proj); 1711 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj(); 1712 int ddepth = dom_depth(proj); 1713 1714 _igvn.rehash_node_delayed(iff); 1715 _igvn.rehash_node_delayed(proj); 1716 1717 proj->set_req(0, NULL); // temporary disconnect 1718 ProjNode* proj2 = proj_clone(proj, iff); 1719 register_node(proj2, loop, iff, ddepth); 1720 1721 Node* cmp = Signed ? (Node*) new (C)CmpINode(left, right) : (Node*) new (C)CmpUNode(left, right); 1722 register_node(cmp, loop, proj2, ddepth); 1723 1724 BoolNode* bol = new (C)BoolNode(cmp, relop); 1725 register_node(bol, loop, proj2, ddepth); 1726 1727 IfNode* new_if = new (C)IfNode(proj2, bol, iff->_prob, iff->_fcnt); 1728 register_node(new_if, loop, proj2, ddepth); 1729 1730 proj->set_req(0, new_if); // reattach 1731 set_idom(proj, new_if, ddepth); 1732 1733 ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj(); 1734 guarantee(new_exit != NULL, "null exit node"); 1735 register_node(new_exit, get_loop(other_proj), new_if, ddepth); 1736 1737 return new_exit; 1738 } 1739 1740 //------------------------------ insert_region_before_proj ------------------------------------- 1741 // Insert a region before an if projection (* - new node) 1742 // 1743 // before 1744 // if(test) 1745 // / | 1746 // v | 1747 // proj v 1748 // other-proj 1749 // 1750 // after 1751 // if(test) 1752 // / | 1753 // v | 1754 // * proj-clone v 1755 // | other-proj 1756 // v 1757 // * new-region 1758 // | 1759 // v 1760 // * dum_if 1761 // / \ 1762 // v \ 1763 // * dum-proj v 1764 // proj 1765 // 1766 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) { 1767 IfNode* iff = proj->in(0)->as_If(); 1768 IdealLoopTree *loop = get_loop(proj); 1769 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj(); 1770 int ddepth = dom_depth(proj); 1771 1772 _igvn.rehash_node_delayed(iff); 1773 _igvn.rehash_node_delayed(proj); 1774 1775 proj->set_req(0, NULL); // temporary disconnect 1776 ProjNode* proj2 = proj_clone(proj, iff); 1777 register_node(proj2, loop, iff, ddepth); 1778 1779 RegionNode* reg = new (C)RegionNode(2); 1780 reg->set_req(1, proj2); 1781 register_node(reg, loop, iff, ddepth); 1782 1783 IfNode* dum_if = new (C)IfNode(reg, short_circuit_if(NULL, proj), iff->_prob, iff->_fcnt); 1784 register_node(dum_if, loop, reg, ddepth); 1785 1786 proj->set_req(0, dum_if); // reattach 1787 set_idom(proj, dum_if, ddepth); 1788 1789 ProjNode* dum_proj = proj_clone(other_proj, dum_if); 1790 register_node(dum_proj, loop, dum_if, ddepth); 1791 1792 return reg; 1793 } 1794 1795 //------------------------------ insert_cmpi_loop_exit ------------------------------------- 1796 // Clone a signed compare loop exit from an unsigned compare and 1797 // insert it before the unsigned cmp on the stay-in-loop path. 1798 // All new nodes inserted in the dominator tree between the original 1799 // if and it's projections. The original if test is replaced with 1800 // a constant to force the stay-in-loop path. 1801 // 1802 // This is done to make sure that the original if and it's projections 1803 // still dominate the same set of control nodes, that the ctrl() relation 1804 // from data nodes to them is preserved, and that their loop nesting is 1805 // preserved. 1806 // 1807 // before 1808 // if(i <u limit) unsigned compare loop exit 1809 // / | 1810 // v v 1811 // exit-proj stay-in-loop-proj 1812 // 1813 // after 1814 // if(stay-in-loop-const) original if 1815 // / | 1816 // / v 1817 // / if(i < limit) new signed test 1818 // / / | 1819 // / / v 1820 // / / if(i <u limit) new cloned unsigned test 1821 // / / / | 1822 // v v v | 1823 // region | 1824 // | | 1825 // dum-if | 1826 // / | | 1827 // ether | | 1828 // v v 1829 // exit-proj stay-in-loop-proj 1830 // 1831 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop) { 1832 const bool Signed = true; 1833 const bool Unsigned = false; 1834 1835 BoolNode* bol = if_cmpu->in(1)->as_Bool(); 1836 if (bol->_test._test != BoolTest::lt) return NULL; 1837 CmpNode* cmpu = bol->in(1)->as_Cmp(); 1838 if (cmpu->Opcode() != Op_CmpU) return NULL; 1839 int stride = stride_of_possible_iv(if_cmpu); 1840 if (stride == 0) return NULL; 1841 1842 Node* lp_proj = stay_in_loop(if_cmpu, loop); 1843 guarantee(lp_proj != NULL, "null loop node"); 1844 1845 ProjNode* lp_continue = lp_proj->as_Proj(); 1846 ProjNode* lp_exit = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj(); 1847 1848 Node* limit = NULL; 1849 if (stride > 0) { 1850 limit = cmpu->in(2); 1851 } else { 1852 limit = _igvn.makecon(TypeInt::ZERO); 1853 set_ctrl(limit, C->root()); 1854 } 1855 // Create a new region on the exit path 1856 RegionNode* reg = insert_region_before_proj(lp_exit); 1857 guarantee(reg != NULL, "null region node"); 1858 1859 // Clone the if-cmpu-true-false using a signed compare 1860 BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge; 1861 ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, limit, lp_continue); 1862 reg->add_req(cmpi_exit); 1863 1864 // Clone the if-cmpu-true-false 1865 BoolTest::mask rel_u = bol->_test._test; 1866 ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue); 1867 reg->add_req(cmpu_exit); 1868 1869 // Force original if to stay in loop. 1870 short_circuit_if(if_cmpu, lp_continue); 1871 1872 return cmpi_exit->in(0)->as_If(); 1873 } 1874 1875 //------------------------------ remove_cmpi_loop_exit ------------------------------------- 1876 // Remove a previously inserted signed compare loop exit. 1877 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) { 1878 Node* lp_proj = stay_in_loop(if_cmp, loop); 1879 assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI && 1880 stay_in_loop(lp_proj, loop)->is_If() && 1881 stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu"); 1882 Node *con = _igvn.makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO); 1883 set_ctrl(con, C->root()); 1884 if_cmp->set_req(1, con); 1885 } 1886 1887 //------------------------------ scheduled_nodelist ------------------------------------- 1888 // Create a post order schedule of nodes that are in the 1889 // "member" set. The list is returned in "sched". 1890 // The first node in "sched" is the loop head, followed by 1891 // nodes which have no inputs in the "member" set, and then 1892 // followed by the nodes that have an immediate input dependence 1893 // on a node in "sched". 1894 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) { 1895 1896 assert(member.test(loop->_head->_idx), "loop head must be in member set"); 1897 Arena *a = Thread::current()->resource_area(); 1898 VectorSet visited(a); 1899 Node_Stack nstack(a, loop->_body.size()); 1900 1901 Node* n = loop->_head; // top of stack is cached in "n" 1902 uint idx = 0; 1903 visited.set(n->_idx); 1904 1905 // Initially push all with no inputs from within member set 1906 for(uint i = 0; i < loop->_body.size(); i++ ) { 1907 Node *elt = loop->_body.at(i); 1908 if (member.test(elt->_idx)) { 1909 bool found = false; 1910 for (uint j = 0; j < elt->req(); j++) { 1911 Node* def = elt->in(j); 1912 if (def && member.test(def->_idx) && def != elt) { 1913 found = true; 1914 break; 1915 } 1916 } 1917 if (!found && elt != loop->_head) { 1918 nstack.push(n, idx); 1919 n = elt; 1920 assert(!visited.test(n->_idx), "not seen yet"); 1921 visited.set(n->_idx); 1922 } 1923 } 1924 } 1925 1926 // traverse out's that are in the member set 1927 while (true) { 1928 if (idx < n->outcnt()) { 1929 Node* use = n->raw_out(idx); 1930 idx++; 1931 if (!visited.test_set(use->_idx)) { 1932 if (member.test(use->_idx)) { 1933 nstack.push(n, idx); 1934 n = use; 1935 idx = 0; 1936 } 1937 } 1938 } else { 1939 // All outputs processed 1940 sched.push(n); 1941 if (nstack.is_empty()) break; 1942 n = nstack.node(); 1943 idx = nstack.index(); 1944 nstack.pop(); 1945 } 1946 } 1947 } 1948 1949 1950 //------------------------------ has_use_in_set ------------------------------------- 1951 // Has a use in the vector set 1952 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) { 1953 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 1954 Node* use = n->fast_out(j); 1955 if (vset.test(use->_idx)) { 1956 return true; 1957 } 1958 } 1959 return false; 1960 } 1961 1962 1963 //------------------------------ has_use_internal_to_set ------------------------------------- 1964 // Has use internal to the vector set (ie. not in a phi at the loop head) 1965 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) { 1966 Node* head = loop->_head; 1967 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 1968 Node* use = n->fast_out(j); 1969 if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) { 1970 return true; 1971 } 1972 } 1973 return false; 1974 } 1975 1976 1977 //------------------------------ clone_for_use_outside_loop ------------------------------------- 1978 // clone "n" for uses that are outside of loop 1979 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) { 1980 int cloned = 0; 1981 assert(worklist.size() == 0, "should be empty"); 1982 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 1983 Node* use = n->fast_out(j); 1984 if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) { 1985 worklist.push(use); 1986 } 1987 } 1988 while( worklist.size() ) { 1989 Node *use = worklist.pop(); 1990 if (!has_node(use) || use->in(0) == C->top()) continue; 1991 uint j; 1992 for (j = 0; j < use->req(); j++) { 1993 if (use->in(j) == n) break; 1994 } 1995 assert(j < use->req(), "must be there"); 1996 1997 // clone "n" and insert it between the inputs of "n" and the use outside the loop 1998 Node* n_clone = n->clone(); 1999 _igvn.replace_input_of(use, j, n_clone); 2000 cloned++; 2001 Node* use_c; 2002 if (!use->is_Phi()) { 2003 use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0); 2004 } else { 2005 // Use in a phi is considered a use in the associated predecessor block 2006 use_c = use->in(0)->in(j); 2007 } 2008 set_ctrl(n_clone, use_c); 2009 assert(!loop->is_member(get_loop(use_c)), "should be outside loop"); 2010 get_loop(use_c)->_body.push(n_clone); 2011 _igvn.register_new_node_with_optimizer(n_clone); 2012 #if !defined(PRODUCT) 2013 if (TracePartialPeeling) { 2014 tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx); 2015 } 2016 #endif 2017 } 2018 return cloned; 2019 } 2020 2021 2022 //------------------------------ clone_for_special_use_inside_loop ------------------------------------- 2023 // clone "n" for special uses that are in the not_peeled region. 2024 // If these def-uses occur in separate blocks, the code generator 2025 // marks the method as not compilable. For example, if a "BoolNode" 2026 // is in a different basic block than the "IfNode" that uses it, then 2027 // the compilation is aborted in the code generator. 2028 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n, 2029 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) { 2030 if (n->is_Phi() || n->is_Load()) { 2031 return; 2032 } 2033 assert(worklist.size() == 0, "should be empty"); 2034 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 2035 Node* use = n->fast_out(j); 2036 if ( not_peel.test(use->_idx) && 2037 (use->is_If() || use->is_CMove() || use->is_Bool()) && 2038 use->in(1) == n) { 2039 worklist.push(use); 2040 } 2041 } 2042 if (worklist.size() > 0) { 2043 // clone "n" and insert it between inputs of "n" and the use 2044 Node* n_clone = n->clone(); 2045 loop->_body.push(n_clone); 2046 _igvn.register_new_node_with_optimizer(n_clone); 2047 set_ctrl(n_clone, get_ctrl(n)); 2048 sink_list.push(n_clone); 2049 not_peel <<= n_clone->_idx; // add n_clone to not_peel set. 2050 #if !defined(PRODUCT) 2051 if (TracePartialPeeling) { 2052 tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx); 2053 } 2054 #endif 2055 while( worklist.size() ) { 2056 Node *use = worklist.pop(); 2057 _igvn.rehash_node_delayed(use); 2058 for (uint j = 1; j < use->req(); j++) { 2059 if (use->in(j) == n) { 2060 use->set_req(j, n_clone); 2061 } 2062 } 2063 } 2064 } 2065 } 2066 2067 2068 //------------------------------ insert_phi_for_loop ------------------------------------- 2069 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist 2070 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) { 2071 Node *phi = PhiNode::make(lp, back_edge_val); 2072 phi->set_req(LoopNode::EntryControl, lp_entry_val); 2073 // Use existing phi if it already exists 2074 Node *hit = _igvn.hash_find_insert(phi); 2075 if( hit == NULL ) { 2076 _igvn.register_new_node_with_optimizer(phi); 2077 set_ctrl(phi, lp); 2078 } else { 2079 // Remove the new phi from the graph and use the hit 2080 _igvn.remove_dead_node(phi); 2081 phi = hit; 2082 } 2083 _igvn.replace_input_of(use, idx, phi); 2084 } 2085 2086 #ifdef ASSERT 2087 //------------------------------ is_valid_loop_partition ------------------------------------- 2088 // Validate the loop partition sets: peel and not_peel 2089 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, 2090 VectorSet& not_peel ) { 2091 uint i; 2092 // Check that peel_list entries are in the peel set 2093 for (i = 0; i < peel_list.size(); i++) { 2094 if (!peel.test(peel_list.at(i)->_idx)) { 2095 return false; 2096 } 2097 } 2098 // Check at loop members are in one of peel set or not_peel set 2099 for (i = 0; i < loop->_body.size(); i++ ) { 2100 Node *def = loop->_body.at(i); 2101 uint di = def->_idx; 2102 // Check that peel set elements are in peel_list 2103 if (peel.test(di)) { 2104 if (not_peel.test(di)) { 2105 return false; 2106 } 2107 // Must be in peel_list also 2108 bool found = false; 2109 for (uint j = 0; j < peel_list.size(); j++) { 2110 if (peel_list.at(j)->_idx == di) { 2111 found = true; 2112 break; 2113 } 2114 } 2115 if (!found) { 2116 return false; 2117 } 2118 } else if (not_peel.test(di)) { 2119 if (peel.test(di)) { 2120 return false; 2121 } 2122 } else { 2123 return false; 2124 } 2125 } 2126 return true; 2127 } 2128 2129 //------------------------------ is_valid_clone_loop_exit_use ------------------------------------- 2130 // Ensure a use outside of loop is of the right form 2131 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) { 2132 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use; 2133 return (use->is_Phi() && 2134 use_c->is_Region() && use_c->req() == 3 && 2135 (use_c->in(exit_idx)->Opcode() == Op_IfTrue || 2136 use_c->in(exit_idx)->Opcode() == Op_IfFalse || 2137 use_c->in(exit_idx)->Opcode() == Op_JumpProj) && 2138 loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) ); 2139 } 2140 2141 //------------------------------ is_valid_clone_loop_form ------------------------------------- 2142 // Ensure that all uses outside of loop are of the right form 2143 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list, 2144 uint orig_exit_idx, uint clone_exit_idx) { 2145 uint len = peel_list.size(); 2146 for (uint i = 0; i < len; i++) { 2147 Node *def = peel_list.at(i); 2148 2149 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { 2150 Node *use = def->fast_out(j); 2151 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use; 2152 if (!loop->is_member(get_loop(use_c))) { 2153 // use is not in the loop, check for correct structure 2154 if (use->in(0) == def) { 2155 // Okay 2156 } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) { 2157 return false; 2158 } 2159 } 2160 } 2161 } 2162 return true; 2163 } 2164 #endif 2165 2166 //------------------------------ partial_peel ------------------------------------- 2167 // Partially peel (aka loop rotation) the top portion of a loop (called 2168 // the peel section below) by cloning it and placing one copy just before 2169 // the new loop head and the other copy at the bottom of the new loop. 2170 // 2171 // before after where it came from 2172 // 2173 // stmt1 stmt1 2174 // loop: stmt2 clone 2175 // stmt2 if condA goto exitA clone 2176 // if condA goto exitA new_loop: new 2177 // stmt3 stmt3 clone 2178 // if !condB goto loop if condB goto exitB clone 2179 // exitB: stmt2 orig 2180 // stmt4 if !condA goto new_loop orig 2181 // exitA: goto exitA 2182 // exitB: 2183 // stmt4 2184 // exitA: 2185 // 2186 // Step 1: find the cut point: an exit test on probable 2187 // induction variable. 2188 // Step 2: schedule (with cloning) operations in the peel 2189 // section that can be executed after the cut into 2190 // the section that is not peeled. This may need 2191 // to clone operations into exit blocks. For 2192 // instance, a reference to A[i] in the not-peel 2193 // section and a reference to B[i] in an exit block 2194 // may cause a left-shift of i by 2 to be placed 2195 // in the peel block. This step will clone the left 2196 // shift into the exit block and sink the left shift 2197 // from the peel to the not-peel section. 2198 // Step 3: clone the loop, retarget the control, and insert 2199 // phis for values that are live across the new loop 2200 // head. This is very dependent on the graph structure 2201 // from clone_loop. It creates region nodes for 2202 // exit control and associated phi nodes for values 2203 // flow out of the loop through that exit. The region 2204 // node is dominated by the clone's control projection. 2205 // So the clone's peel section is placed before the 2206 // new loop head, and the clone's not-peel section is 2207 // forms the top part of the new loop. The original 2208 // peel section forms the tail of the new loop. 2209 // Step 4: update the dominator tree and recompute the 2210 // dominator depth. 2211 // 2212 // orig 2213 // 2214 // stmt1 2215 // | 2216 // v 2217 // loop predicate 2218 // | 2219 // v 2220 // loop<----+ 2221 // | | 2222 // stmt2 | 2223 // | | 2224 // v | 2225 // ifA | 2226 // / | | 2227 // v v | 2228 // false true ^ <-- last_peel 2229 // / | | 2230 // / ===|==cut | 2231 // / stmt3 | <-- first_not_peel 2232 // / | | 2233 // | v | 2234 // v ifB | 2235 // exitA: / \ | 2236 // / \ | 2237 // v v | 2238 // false true | 2239 // / \ | 2240 // / ----+ 2241 // | 2242 // v 2243 // exitB: 2244 // stmt4 2245 // 2246 // 2247 // after clone loop 2248 // 2249 // stmt1 2250 // | 2251 // v 2252 // loop predicate 2253 // / \ 2254 // clone / \ orig 2255 // / \ 2256 // / \ 2257 // v v 2258 // +---->loop loop<----+ 2259 // | | | | 2260 // | stmt2 stmt2 | 2261 // | | | | 2262 // | v v | 2263 // | ifA ifA | 2264 // | | \ / | | 2265 // | v v v v | 2266 // ^ true false false true ^ <-- last_peel 2267 // | | ^ \ / | | 2268 // | cut==|== \ \ / ===|==cut | 2269 // | stmt3 \ \ / stmt3 | <-- first_not_peel 2270 // | | dom | | | | 2271 // | v \ 1v v2 v | 2272 // | ifB regionA ifB | 2273 // | / \ | / \ | 2274 // | / \ v / \ | 2275 // | v v exitA: v v | 2276 // | true false false true | 2277 // | / ^ \ / \ | 2278 // +---- \ \ / ----+ 2279 // dom \ / 2280 // \ 1v v2 2281 // regionB 2282 // | 2283 // v 2284 // exitB: 2285 // stmt4 2286 // 2287 // 2288 // after partial peel 2289 // 2290 // stmt1 2291 // | 2292 // v 2293 // loop predicate 2294 // / 2295 // clone / orig 2296 // / TOP 2297 // / \ 2298 // v v 2299 // TOP->loop loop----+ 2300 // | | | 2301 // stmt2 stmt2 | 2302 // | | | 2303 // v v | 2304 // ifA ifA | 2305 // | \ / | | 2306 // v v v v | 2307 // true false false true | <-- last_peel 2308 // | ^ \ / +------|---+ 2309 // +->newloop \ \ / === ==cut | | 2310 // | stmt3 \ \ / TOP | | 2311 // | | dom | | stmt3 | | <-- first_not_peel 2312 // | v \ 1v v2 v | | 2313 // | ifB regionA ifB ^ v 2314 // | / \ | / \ | | 2315 // | / \ v / \ | | 2316 // | v v exitA: v v | | 2317 // | true false false true | | 2318 // | / ^ \ / \ | | 2319 // | | \ \ / v | | 2320 // | | dom \ / TOP | | 2321 // | | \ 1v v2 | | 2322 // ^ v regionB | | 2323 // | | | | | 2324 // | | v ^ v 2325 // | | exitB: | | 2326 // | | stmt4 | | 2327 // | +------------>-----------------+ | 2328 // | | 2329 // +-----------------<---------------------+ 2330 // 2331 // 2332 // final graph 2333 // 2334 // stmt1 2335 // | 2336 // v 2337 // loop predicate 2338 // | 2339 // v 2340 // stmt2 clone 2341 // | 2342 // v 2343 // ........> ifA clone 2344 // : / | 2345 // dom / | 2346 // : v v 2347 // : false true 2348 // : | | 2349 // : | v 2350 // : | newloop<-----+ 2351 // : | | | 2352 // : | stmt3 clone | 2353 // : | | | 2354 // : | v | 2355 // : | ifB | 2356 // : | / \ | 2357 // : | v v | 2358 // : | false true | 2359 // : | | | | 2360 // : | v stmt2 | 2361 // : | exitB: | | 2362 // : | stmt4 v | 2363 // : | ifA orig | 2364 // : | / \ | 2365 // : | / \ | 2366 // : | v v | 2367 // : | false true | 2368 // : | / \ | 2369 // : v v -----+ 2370 // RegionA 2371 // | 2372 // v 2373 // exitA 2374 // 2375 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { 2376 2377 assert(!loop->_head->is_CountedLoop(), "Non-counted loop only"); 2378 if (!loop->_head->is_Loop()) { 2379 return false; } 2380 2381 LoopNode *head = loop->_head->as_Loop(); 2382 2383 if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) { 2384 return false; 2385 } 2386 2387 // Check for complex exit control 2388 for(uint ii = 0; ii < loop->_body.size(); ii++ ) { 2389 Node *n = loop->_body.at(ii); 2390 int opc = n->Opcode(); 2391 if (n->is_Call() || 2392 opc == Op_Catch || 2393 opc == Op_CatchProj || 2394 opc == Op_Jump || 2395 opc == Op_JumpProj) { 2396 #if !defined(PRODUCT) 2397 if (TracePartialPeeling) { 2398 tty->print_cr("\nExit control too complex: lp: %d", head->_idx); 2399 } 2400 #endif 2401 return false; 2402 } 2403 } 2404 2405 int dd = dom_depth(head); 2406 2407 // Step 1: find cut point 2408 2409 // Walk up dominators to loop head looking for first loop exit 2410 // which is executed on every path thru loop. 2411 IfNode *peel_if = NULL; 2412 IfNode *peel_if_cmpu = NULL; 2413 2414 Node *iff = loop->tail(); 2415 while( iff != head ) { 2416 if( iff->is_If() ) { 2417 Node *ctrl = get_ctrl(iff->in(1)); 2418 if (ctrl->is_top()) return false; // Dead test on live IF. 2419 // If loop-varying exit-test, check for induction variable 2420 if( loop->is_member(get_loop(ctrl)) && 2421 loop->is_loop_exit(iff) && 2422 is_possible_iv_test(iff)) { 2423 Node* cmp = iff->in(1)->in(1); 2424 if (cmp->Opcode() == Op_CmpI) { 2425 peel_if = iff->as_If(); 2426 } else { 2427 assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU"); 2428 peel_if_cmpu = iff->as_If(); 2429 } 2430 } 2431 } 2432 iff = idom(iff); 2433 } 2434 // Prefer signed compare over unsigned compare. 2435 IfNode* new_peel_if = NULL; 2436 if (peel_if == NULL) { 2437 if (!PartialPeelAtUnsignedTests || peel_if_cmpu == NULL) { 2438 return false; // No peel point found 2439 } 2440 new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop); 2441 if (new_peel_if == NULL) { 2442 return false; // No peel point found 2443 } 2444 peel_if = new_peel_if; 2445 } 2446 Node* last_peel = stay_in_loop(peel_if, loop); 2447 Node* first_not_peeled = stay_in_loop(last_peel, loop); 2448 if (first_not_peeled == NULL || first_not_peeled == head) { 2449 return false; 2450 } 2451 2452 #if !defined(PRODUCT) 2453 if (TraceLoopOpts) { 2454 tty->print("PartialPeel "); 2455 loop->dump_head(); 2456 } 2457 2458 if (TracePartialPeeling) { 2459 tty->print_cr("before partial peel one iteration"); 2460 Node_List wl; 2461 Node* t = head->in(2); 2462 while (true) { 2463 wl.push(t); 2464 if (t == head) break; 2465 t = idom(t); 2466 } 2467 while (wl.size() > 0) { 2468 Node* tt = wl.pop(); 2469 tt->dump(); 2470 if (tt == last_peel) tty->print_cr("-- cut --"); 2471 } 2472 } 2473 #endif 2474 ResourceArea *area = Thread::current()->resource_area(); 2475 VectorSet peel(area); 2476 VectorSet not_peel(area); 2477 Node_List peel_list(area); 2478 Node_List worklist(area); 2479 Node_List sink_list(area); 2480 2481 // Set of cfg nodes to peel are those that are executable from 2482 // the head through last_peel. 2483 assert(worklist.size() == 0, "should be empty"); 2484 worklist.push(head); 2485 peel.set(head->_idx); 2486 while (worklist.size() > 0) { 2487 Node *n = worklist.pop(); 2488 if (n != last_peel) { 2489 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 2490 Node* use = n->fast_out(j); 2491 if (use->is_CFG() && 2492 loop->is_member(get_loop(use)) && 2493 !peel.test_set(use->_idx)) { 2494 worklist.push(use); 2495 } 2496 } 2497 } 2498 } 2499 2500 // Set of non-cfg nodes to peel are those that are control 2501 // dependent on the cfg nodes. 2502 uint i; 2503 for(i = 0; i < loop->_body.size(); i++ ) { 2504 Node *n = loop->_body.at(i); 2505 Node *n_c = has_ctrl(n) ? get_ctrl(n) : n; 2506 if (peel.test(n_c->_idx)) { 2507 peel.set(n->_idx); 2508 } else { 2509 not_peel.set(n->_idx); 2510 } 2511 } 2512 2513 // Step 2: move operations from the peeled section down into the 2514 // not-peeled section 2515 2516 // Get a post order schedule of nodes in the peel region 2517 // Result in right-most operand. 2518 scheduled_nodelist(loop, peel, peel_list ); 2519 2520 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition"); 2521 2522 // For future check for too many new phis 2523 uint old_phi_cnt = 0; 2524 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { 2525 Node* use = head->fast_out(j); 2526 if (use->is_Phi()) old_phi_cnt++; 2527 } 2528 2529 #if !defined(PRODUCT) 2530 if (TracePartialPeeling) { 2531 tty->print_cr("\npeeled list"); 2532 } 2533 #endif 2534 2535 // Evacuate nodes in peel region into the not_peeled region if possible 2536 uint new_phi_cnt = 0; 2537 uint cloned_for_outside_use = 0; 2538 for (i = 0; i < peel_list.size();) { 2539 Node* n = peel_list.at(i); 2540 #if !defined(PRODUCT) 2541 if (TracePartialPeeling) n->dump(); 2542 #endif 2543 bool incr = true; 2544 if ( !n->is_CFG() ) { 2545 2546 if ( has_use_in_set(n, not_peel) ) { 2547 2548 // If not used internal to the peeled region, 2549 // move "n" from peeled to not_peeled region. 2550 2551 if ( !has_use_internal_to_set(n, peel, loop) ) { 2552 2553 // if not pinned and not a load (which maybe anti-dependent on a store) 2554 // and not a CMove (Matcher expects only bool->cmove). 2555 if ( n->in(0) == NULL && !n->is_Load() && !n->is_CMove() ) { 2556 cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist ); 2557 sink_list.push(n); 2558 peel >>= n->_idx; // delete n from peel set. 2559 not_peel <<= n->_idx; // add n to not_peel set. 2560 peel_list.remove(i); 2561 incr = false; 2562 #if !defined(PRODUCT) 2563 if (TracePartialPeeling) { 2564 tty->print_cr("sink to not_peeled region: %d newbb: %d", 2565 n->_idx, get_ctrl(n)->_idx); 2566 } 2567 #endif 2568 } 2569 } else { 2570 // Otherwise check for special def-use cases that span 2571 // the peel/not_peel boundary such as bool->if 2572 clone_for_special_use_inside_loop( loop, n, not_peel, sink_list, worklist ); 2573 new_phi_cnt++; 2574 } 2575 } 2576 } 2577 if (incr) i++; 2578 } 2579 2580 if (new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta) { 2581 #if !defined(PRODUCT) 2582 if (TracePartialPeeling) { 2583 tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c", 2584 new_phi_cnt, old_phi_cnt, new_peel_if != NULL?'T':'F'); 2585 } 2586 #endif 2587 if (new_peel_if != NULL) { 2588 remove_cmpi_loop_exit(new_peel_if, loop); 2589 } 2590 // Inhibit more partial peeling on this loop 2591 assert(!head->is_partial_peel_loop(), "not partial peeled"); 2592 head->mark_partial_peel_failed(); 2593 if (cloned_for_outside_use > 0) { 2594 // Terminate this round of loop opts because 2595 // the graph outside this loop was changed. 2596 C->set_major_progress(); 2597 return true; 2598 } 2599 return false; 2600 } 2601 2602 // Step 3: clone loop, retarget control, and insert new phis 2603 2604 // Create new loop head for new phis and to hang 2605 // the nodes being moved (sinked) from the peel region. 2606 LoopNode* new_head = new (C) LoopNode(last_peel, last_peel); 2607 new_head->set_unswitch_count(head->unswitch_count()); // Preserve 2608 _igvn.register_new_node_with_optimizer(new_head); 2609 assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled"); 2610 first_not_peeled->set_req(0, new_head); 2611 set_loop(new_head, loop); 2612 loop->_body.push(new_head); 2613 not_peel.set(new_head->_idx); 2614 set_idom(new_head, last_peel, dom_depth(first_not_peeled)); 2615 set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled)); 2616 2617 while (sink_list.size() > 0) { 2618 Node* n = sink_list.pop(); 2619 set_ctrl(n, new_head); 2620 } 2621 2622 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition"); 2623 2624 clone_loop( loop, old_new, dd ); 2625 2626 const uint clone_exit_idx = 1; 2627 const uint orig_exit_idx = 2; 2628 assert(is_valid_clone_loop_form( loop, peel_list, orig_exit_idx, clone_exit_idx ), "bad clone loop"); 2629 2630 Node* head_clone = old_new[head->_idx]; 2631 LoopNode* new_head_clone = old_new[new_head->_idx]->as_Loop(); 2632 Node* orig_tail_clone = head_clone->in(2); 2633 2634 // Add phi if "def" node is in peel set and "use" is not 2635 2636 for(i = 0; i < peel_list.size(); i++ ) { 2637 Node *def = peel_list.at(i); 2638 if (!def->is_CFG()) { 2639 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { 2640 Node *use = def->fast_out(j); 2641 if (has_node(use) && use->in(0) != C->top() && 2642 (!peel.test(use->_idx) || 2643 (use->is_Phi() && use->in(0) == head)) ) { 2644 worklist.push(use); 2645 } 2646 } 2647 while( worklist.size() ) { 2648 Node *use = worklist.pop(); 2649 for (uint j = 1; j < use->req(); j++) { 2650 Node* n = use->in(j); 2651 if (n == def) { 2652 2653 // "def" is in peel set, "use" is not in peel set 2654 // or "use" is in the entry boundary (a phi) of the peel set 2655 2656 Node* use_c = has_ctrl(use) ? get_ctrl(use) : use; 2657 2658 if ( loop->is_member(get_loop( use_c )) ) { 2659 // use is in loop 2660 if (old_new[use->_idx] != NULL) { // null for dead code 2661 Node* use_clone = old_new[use->_idx]; 2662 _igvn.replace_input_of(use, j, C->top()); 2663 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone ); 2664 } 2665 } else { 2666 assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format"); 2667 // use is not in the loop, check if the live range includes the cut 2668 Node* lp_if = use_c->in(orig_exit_idx)->in(0); 2669 if (not_peel.test(lp_if->_idx)) { 2670 assert(j == orig_exit_idx, "use from original loop"); 2671 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone ); 2672 } 2673 } 2674 } 2675 } 2676 } 2677 } 2678 } 2679 2680 // Step 3b: retarget control 2681 2682 // Redirect control to the new loop head if a cloned node in 2683 // the not_peeled region has control that points into the peeled region. 2684 // This necessary because the cloned peeled region will be outside 2685 // the loop. 2686 // from to 2687 // cloned-peeled <---+ 2688 // new_head_clone: | <--+ 2689 // cloned-not_peeled in(0) in(0) 2690 // orig-peeled 2691 2692 for(i = 0; i < loop->_body.size(); i++ ) { 2693 Node *n = loop->_body.at(i); 2694 if (!n->is_CFG() && n->in(0) != NULL && 2695 not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) { 2696 Node* n_clone = old_new[n->_idx]; 2697 _igvn.replace_input_of(n_clone, 0, new_head_clone); 2698 } 2699 } 2700 2701 // Backedge of the surviving new_head (the clone) is original last_peel 2702 _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel); 2703 2704 // Cut first node in original not_peel set 2705 _igvn.rehash_node_delayed(new_head); // Multiple edge updates: 2706 new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of 2707 new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls 2708 2709 // Copy head_clone back-branch info to original head 2710 // and remove original head's loop entry and 2711 // clone head's back-branch 2712 _igvn.rehash_node_delayed(head); // Multiple edge updates 2713 head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl)); 2714 head->set_req(LoopNode::LoopBackControl, C->top()); 2715 _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top()); 2716 2717 // Similarly modify the phis 2718 for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) { 2719 Node* use = head->fast_out(k); 2720 if (use->is_Phi() && use->outcnt() > 0) { 2721 Node* use_clone = old_new[use->_idx]; 2722 _igvn.rehash_node_delayed(use); // Multiple edge updates 2723 use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl)); 2724 use->set_req(LoopNode::LoopBackControl, C->top()); 2725 _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top()); 2726 } 2727 } 2728 2729 // Step 4: update dominator tree and dominator depth 2730 2731 set_idom(head, orig_tail_clone, dd); 2732 recompute_dom_depth(); 2733 2734 // Inhibit more partial peeling on this loop 2735 new_head_clone->set_partial_peel_loop(); 2736 C->set_major_progress(); 2737 loop->record_for_igvn(); 2738 2739 #if !defined(PRODUCT) 2740 if (TracePartialPeeling) { 2741 tty->print_cr("\nafter partial peel one iteration"); 2742 Node_List wl(area); 2743 Node* t = last_peel; 2744 while (true) { 2745 wl.push(t); 2746 if (t == head_clone) break; 2747 t = idom(t); 2748 } 2749 while (wl.size() > 0) { 2750 Node* tt = wl.pop(); 2751 if (tt == head) tty->print_cr("orig head"); 2752 else if (tt == new_head_clone) tty->print_cr("new head"); 2753 else if (tt == head_clone) tty->print_cr("clone head"); 2754 tt->dump(); 2755 } 2756 } 2757 #endif 2758 return true; 2759 } 2760 2761 //------------------------------reorg_offsets---------------------------------- 2762 // Reorganize offset computations to lower register pressure. Mostly 2763 // prevent loop-fallout uses of the pre-incremented trip counter (which are 2764 // then alive with the post-incremented trip counter forcing an extra 2765 // register move) 2766 void PhaseIdealLoop::reorg_offsets(IdealLoopTree *loop) { 2767 // Perform it only for canonical counted loops. 2768 // Loop's shape could be messed up by iteration_split_impl. 2769 if (!loop->_head->is_CountedLoop()) 2770 return; 2771 if (!loop->_head->as_Loop()->is_valid_counted_loop()) 2772 return; 2773 2774 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 2775 CountedLoopEndNode *cle = cl->loopexit(); 2776 Node *exit = cle->proj_out(false); 2777 Node *phi = cl->phi(); 2778 2779 // Check for the special case of folks using the pre-incremented 2780 // trip-counter on the fall-out path (forces the pre-incremented 2781 // and post-incremented trip counter to be live at the same time). 2782 // Fix this by adjusting to use the post-increment trip counter. 2783 2784 bool progress = true; 2785 while (progress) { 2786 progress = false; 2787 for (DUIterator_Fast imax, i = phi->fast_outs(imax); i < imax; i++) { 2788 Node* use = phi->fast_out(i); // User of trip-counter 2789 if (!has_ctrl(use)) continue; 2790 Node *u_ctrl = get_ctrl(use); 2791 if (use->is_Phi()) { 2792 u_ctrl = NULL; 2793 for (uint j = 1; j < use->req(); j++) 2794 if (use->in(j) == phi) 2795 u_ctrl = dom_lca(u_ctrl, use->in(0)->in(j)); 2796 } 2797 IdealLoopTree *u_loop = get_loop(u_ctrl); 2798 // Look for loop-invariant use 2799 if (u_loop == loop) continue; 2800 if (loop->is_member(u_loop)) continue; 2801 // Check that use is live out the bottom. Assuming the trip-counter 2802 // update is right at the bottom, uses of of the loop middle are ok. 2803 if (dom_lca(exit, u_ctrl) != exit) continue; 2804 // Hit! Refactor use to use the post-incremented tripcounter. 2805 // Compute a post-increment tripcounter. 2806 Node *opaq = new (C) Opaque2Node( C, cle->incr() ); 2807 register_new_node(opaq, exit); 2808 Node *neg_stride = _igvn.intcon(-cle->stride_con()); 2809 set_ctrl(neg_stride, C->root()); 2810 Node *post = new (C) AddINode( opaq, neg_stride); 2811 register_new_node(post, exit); 2812 _igvn.rehash_node_delayed(use); 2813 for (uint j = 1; j < use->req(); j++) { 2814 if (use->in(j) == phi) 2815 use->set_req(j, post); 2816 } 2817 // Since DU info changed, rerun loop 2818 progress = true; 2819 break; 2820 } 2821 } 2822 2823 }