1 /* 2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/allocation.inline.hpp" 27 #include "opto/addnode.hpp" 28 #include "opto/connode.hpp" 29 #include "opto/divnode.hpp" 30 #include "opto/loopnode.hpp" 31 #include "opto/matcher.hpp" 32 #include "opto/mulnode.hpp" 33 #include "opto/rootnode.hpp" 34 #include "opto/subnode.hpp" 35 36 //============================================================================= 37 //------------------------------split_thru_phi--------------------------------- 38 // Split Node 'n' through merge point if there is enough win. 39 Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) { 40 if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) { 41 // ConvI2L may have type information on it which is unsafe to push up 42 // so disable this for now 43 return NULL; 44 } 45 46 // Splitting range check CastIIs through a loop induction Phi can 47 // cause new Phis to be created that are left unrelated to the loop 48 // induction Phi and prevent optimizations (vectorization) 49 if (n->Opcode() == Op_CastII && n->as_CastII()->has_range_check() && 50 region->is_CountedLoop() && n->in(1) == region->as_CountedLoop()->phi()) { 51 return NULL; 52 } 53 54 int wins = 0; 55 assert(!n->is_CFG(), ""); 56 assert(region->is_Region(), ""); 57 58 const Type* type = n->bottom_type(); 59 const TypeOopPtr *t_oop = _igvn.type(n)->isa_oopptr(); 60 Node *phi; 61 if (t_oop != NULL && t_oop->is_known_instance_field()) { 62 int iid = t_oop->instance_id(); 63 int index = C->get_alias_index(t_oop); 64 int offset = t_oop->offset(); 65 phi = new (C) PhiNode(region, type, NULL, iid, index, offset); 66 } else { 67 phi = PhiNode::make_blank(region, n); 68 } 69 uint old_unique = C->unique(); 70 for (uint i = 1; i < region->req(); i++) { 71 Node *x; 72 Node* the_clone = NULL; 73 if (region->in(i) == C->top()) { 74 x = C->top(); // Dead path? Use a dead data op 75 } else { 76 x = n->clone(); // Else clone up the data op 77 the_clone = x; // Remember for possible deletion. 78 // Alter data node to use pre-phi inputs 79 if (n->in(0) == region) 80 x->set_req( 0, region->in(i) ); 81 for (uint j = 1; j < n->req(); j++) { 82 Node *in = n->in(j); 83 if (in->is_Phi() && in->in(0) == region) 84 x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone 85 } 86 } 87 // Check for a 'win' on some paths 88 const Type *t = x->Value(&_igvn); 89 90 bool singleton = t->singleton(); 91 92 // A TOP singleton indicates that there are no possible values incoming 93 // along a particular edge. In most cases, this is OK, and the Phi will 94 // be eliminated later in an Ideal call. However, we can't allow this to 95 // happen if the singleton occurs on loop entry, as the elimination of 96 // the PhiNode may cause the resulting node to migrate back to a previous 97 // loop iteration. 98 if (singleton && t == Type::TOP) { 99 // Is_Loop() == false does not confirm the absence of a loop (e.g., an 100 // irreducible loop may not be indicated by an affirmative is_Loop()); 101 // therefore, the only top we can split thru a phi is on a backedge of 102 // a loop. 103 singleton &= region->is_Loop() && (i != LoopNode::EntryControl); 104 } 105 106 if (singleton) { 107 wins++; 108 x = ((PhaseGVN&)_igvn).makecon(t); 109 } else { 110 // We now call Identity to try to simplify the cloned node. 111 // Note that some Identity methods call phase->type(this). 112 // Make sure that the type array is big enough for 113 // our new node, even though we may throw the node away. 114 // (Note: This tweaking with igvn only works because x is a new node.) 115 _igvn.set_type(x, t); 116 // If x is a TypeNode, capture any more-precise type permanently into Node 117 // otherwise it will be not updated during igvn->transform since 118 // igvn->type(x) is set to x->Value() already. 119 x->raise_bottom_type(t); 120 Node *y = x->Identity(&_igvn); 121 if (y != x) { 122 wins++; 123 x = y; 124 } else { 125 y = _igvn.hash_find(x); 126 if (y) { 127 wins++; 128 x = y; 129 } else { 130 // Else x is a new node we are keeping 131 // We do not need register_new_node_with_optimizer 132 // because set_type has already been called. 133 _igvn._worklist.push(x); 134 } 135 } 136 } 137 if (x != the_clone && the_clone != NULL) 138 _igvn.remove_dead_node(the_clone); 139 phi->set_req( i, x ); 140 } 141 // Too few wins? 142 if (wins <= policy) { 143 _igvn.remove_dead_node(phi); 144 return NULL; 145 } 146 147 // Record Phi 148 register_new_node( phi, region ); 149 150 for (uint i2 = 1; i2 < phi->req(); i2++) { 151 Node *x = phi->in(i2); 152 // If we commoned up the cloned 'x' with another existing Node, 153 // the existing Node picks up a new use. We need to make the 154 // existing Node occur higher up so it dominates its uses. 155 Node *old_ctrl; 156 IdealLoopTree *old_loop; 157 158 if (x->is_Con()) { 159 // Constant's control is always root. 160 set_ctrl(x, C->root()); 161 continue; 162 } 163 // The occasional new node 164 if (x->_idx >= old_unique) { // Found a new, unplaced node? 165 old_ctrl = NULL; 166 old_loop = NULL; // Not in any prior loop 167 } else { 168 old_ctrl = get_ctrl(x); 169 old_loop = get_loop(old_ctrl); // Get prior loop 170 } 171 // New late point must dominate new use 172 Node *new_ctrl = dom_lca(old_ctrl, region->in(i2)); 173 if (new_ctrl == old_ctrl) // Nothing is changed 174 continue; 175 176 IdealLoopTree *new_loop = get_loop(new_ctrl); 177 178 // Don't move x into a loop if its uses are 179 // outside of loop. Otherwise x will be cloned 180 // for each use outside of this loop. 181 IdealLoopTree *use_loop = get_loop(region); 182 if (!new_loop->is_member(use_loop) && 183 (old_loop == NULL || !new_loop->is_member(old_loop))) { 184 // Take early control, later control will be recalculated 185 // during next iteration of loop optimizations. 186 new_ctrl = get_early_ctrl(x); 187 new_loop = get_loop(new_ctrl); 188 } 189 // Set new location 190 set_ctrl(x, new_ctrl); 191 // If changing loop bodies, see if we need to collect into new body 192 if (old_loop != new_loop) { 193 if (old_loop && !old_loop->_child) 194 old_loop->_body.yank(x); 195 if (!new_loop->_child) 196 new_loop->_body.push(x); // Collect body info 197 } 198 } 199 200 return phi; 201 } 202 203 //------------------------------dominated_by------------------------------------ 204 // Replace the dominated test with an obvious true or false. Place it on the 205 // IGVN worklist for later cleanup. Move control-dependent data Nodes on the 206 // live path up to the dominating control. 207 void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exclude_loop_predicate ) { 208 #ifndef PRODUCT 209 if (VerifyLoopOptimizations && PrintOpto) tty->print_cr("dominating test"); 210 #endif 211 212 213 // prevdom is the dominating projection of the dominating test. 214 assert( iff->is_If(), "" ); 215 assert( iff->Opcode() == Op_If || iff->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added"); 216 int pop = prevdom->Opcode(); 217 assert( pop == Op_IfFalse || pop == Op_IfTrue, "" ); 218 if (flip) { 219 if (pop == Op_IfTrue) 220 pop = Op_IfFalse; 221 else 222 pop = Op_IfTrue; 223 } 224 // 'con' is set to true or false to kill the dominated test. 225 Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO); 226 set_ctrl(con, C->root()); // Constant gets a new use 227 // Hack the dominated test 228 _igvn.replace_input_of(iff, 1, con); 229 230 // If I dont have a reachable TRUE and FALSE path following the IfNode then 231 // I can assume this path reaches an infinite loop. In this case it's not 232 // important to optimize the data Nodes - either the whole compilation will 233 // be tossed or this path (and all data Nodes) will go dead. 234 if (iff->outcnt() != 2) return; 235 236 // Make control-dependent data Nodes on the live path (path that will remain 237 // once the dominated IF is removed) become control-dependent on the 238 // dominating projection. 239 Node* dp = iff->as_If()->proj_out(pop == Op_IfTrue); 240 241 // Loop predicates may have depending checks which should not 242 // be skipped. For example, range check predicate has two checks 243 // for lower and upper bounds. 244 if (dp == NULL) 245 return; 246 247 ProjNode* dp_proj = dp->as_Proj(); 248 ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj(); 249 if (exclude_loop_predicate && 250 (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) || 251 unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check))) { 252 // If this is a range check (IfNode::is_range_check), do not 253 // reorder because Compile::allow_range_check_smearing might have 254 // changed the check. 255 return; // Let IGVN transformation change control dependence. 256 } 257 258 IdealLoopTree *old_loop = get_loop(dp); 259 260 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { 261 Node* cd = dp->fast_out(i); // Control-dependent node 262 if (cd->depends_only_on_test()) { 263 assert(cd->in(0) == dp, ""); 264 _igvn.replace_input_of(cd, 0, prevdom); 265 set_early_ctrl(cd); 266 IdealLoopTree *new_loop = get_loop(get_ctrl(cd)); 267 if (old_loop != new_loop) { 268 if (!old_loop->_child) old_loop->_body.yank(cd); 269 if (!new_loop->_child) new_loop->_body.push(cd); 270 } 271 --i; 272 --imax; 273 } 274 } 275 } 276 277 //------------------------------has_local_phi_input---------------------------- 278 // Return TRUE if 'n' has Phi inputs from its local block and no other 279 // block-local inputs (all non-local-phi inputs come from earlier blocks) 280 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) { 281 Node *n_ctrl = get_ctrl(n); 282 // See if some inputs come from a Phi in this block, or from before 283 // this block. 284 uint i; 285 for( i = 1; i < n->req(); i++ ) { 286 Node *phi = n->in(i); 287 if( phi->is_Phi() && phi->in(0) == n_ctrl ) 288 break; 289 } 290 if( i >= n->req() ) 291 return NULL; // No Phi inputs; nowhere to clone thru 292 293 // Check for inputs created between 'n' and the Phi input. These 294 // must split as well; they have already been given the chance 295 // (courtesy of a post-order visit) and since they did not we must 296 // recover the 'cost' of splitting them by being very profitable 297 // when splitting 'n'. Since this is unlikely we simply give up. 298 for( i = 1; i < n->req(); i++ ) { 299 Node *m = n->in(i); 300 if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) { 301 // We allow the special case of AddP's with no local inputs. 302 // This allows us to split-up address expressions. 303 if (m->is_AddP() && 304 get_ctrl(m->in(2)) != n_ctrl && 305 get_ctrl(m->in(3)) != n_ctrl) { 306 // Move the AddP up to dominating point 307 set_ctrl_and_loop(m, find_non_split_ctrl(idom(n_ctrl))); 308 continue; 309 } 310 return NULL; 311 } 312 assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control"); 313 } 314 315 return n_ctrl; 316 } 317 318 //------------------------------remix_address_expressions---------------------- 319 // Rework addressing expressions to get the most loop-invariant stuff 320 // moved out. We'd like to do all associative operators, but it's especially 321 // important (common) to do address expressions. 322 Node *PhaseIdealLoop::remix_address_expressions( Node *n ) { 323 if (!has_ctrl(n)) return NULL; 324 Node *n_ctrl = get_ctrl(n); 325 IdealLoopTree *n_loop = get_loop(n_ctrl); 326 327 // See if 'n' mixes loop-varying and loop-invariant inputs and 328 // itself is loop-varying. 329 330 // Only interested in binary ops (and AddP) 331 if( n->req() < 3 || n->req() > 4 ) return NULL; 332 333 Node *n1_ctrl = get_ctrl(n->in( 1)); 334 Node *n2_ctrl = get_ctrl(n->in( 2)); 335 Node *n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3)); 336 IdealLoopTree *n1_loop = get_loop( n1_ctrl ); 337 IdealLoopTree *n2_loop = get_loop( n2_ctrl ); 338 IdealLoopTree *n3_loop = get_loop( n3_ctrl ); 339 340 // Does one of my inputs spin in a tighter loop than self? 341 if( (n_loop->is_member( n1_loop ) && n_loop != n1_loop) || 342 (n_loop->is_member( n2_loop ) && n_loop != n2_loop) || 343 (n_loop->is_member( n3_loop ) && n_loop != n3_loop) ) 344 return NULL; // Leave well enough alone 345 346 // Is at least one of my inputs loop-invariant? 347 if( n1_loop == n_loop && 348 n2_loop == n_loop && 349 n3_loop == n_loop ) 350 return NULL; // No loop-invariant inputs 351 352 353 int n_op = n->Opcode(); 354 355 // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2). 356 if( n_op == Op_LShiftI ) { 357 // Scale is loop invariant 358 Node *scale = n->in(2); 359 Node *scale_ctrl = get_ctrl(scale); 360 IdealLoopTree *scale_loop = get_loop(scale_ctrl ); 361 if( n_loop == scale_loop || !scale_loop->is_member( n_loop ) ) 362 return NULL; 363 const TypeInt *scale_t = scale->bottom_type()->isa_int(); 364 if( scale_t && scale_t->is_con() && scale_t->get_con() >= 16 ) 365 return NULL; // Dont bother with byte/short masking 366 // Add must vary with loop (else shift would be loop-invariant) 367 Node *add = n->in(1); 368 Node *add_ctrl = get_ctrl(add); 369 IdealLoopTree *add_loop = get_loop(add_ctrl); 370 //assert( n_loop == add_loop, "" ); 371 if( n_loop != add_loop ) return NULL; // happens w/ evil ZKM loops 372 373 // Convert I-V into I+ (0-V); same for V-I 374 if( add->Opcode() == Op_SubI && 375 _igvn.type( add->in(1) ) != TypeInt::ZERO ) { 376 Node *zero = _igvn.intcon(0); 377 set_ctrl(zero, C->root()); 378 Node *neg = new (C) SubINode( _igvn.intcon(0), add->in(2) ); 379 register_new_node( neg, get_ctrl(add->in(2) ) ); 380 add = new (C) AddINode( add->in(1), neg ); 381 register_new_node( add, add_ctrl ); 382 } 383 if( add->Opcode() != Op_AddI ) return NULL; 384 // See if one add input is loop invariant 385 Node *add_var = add->in(1); 386 Node *add_var_ctrl = get_ctrl(add_var); 387 IdealLoopTree *add_var_loop = get_loop(add_var_ctrl ); 388 Node *add_invar = add->in(2); 389 Node *add_invar_ctrl = get_ctrl(add_invar); 390 IdealLoopTree *add_invar_loop = get_loop(add_invar_ctrl ); 391 if( add_var_loop == n_loop ) { 392 } else if( add_invar_loop == n_loop ) { 393 // Swap to find the invariant part 394 add_invar = add_var; 395 add_invar_ctrl = add_var_ctrl; 396 add_invar_loop = add_var_loop; 397 add_var = add->in(2); 398 Node *add_var_ctrl = get_ctrl(add_var); 399 IdealLoopTree *add_var_loop = get_loop(add_var_ctrl ); 400 } else // Else neither input is loop invariant 401 return NULL; 402 if( n_loop == add_invar_loop || !add_invar_loop->is_member( n_loop ) ) 403 return NULL; // No invariant part of the add? 404 405 // Yes! Reshape address expression! 406 Node *inv_scale = new (C) LShiftINode( add_invar, scale ); 407 Node *inv_scale_ctrl = 408 dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ? 409 add_invar_ctrl : scale_ctrl; 410 register_new_node( inv_scale, inv_scale_ctrl ); 411 Node *var_scale = new (C) LShiftINode( add_var, scale ); 412 register_new_node( var_scale, n_ctrl ); 413 Node *var_add = new (C) AddINode( var_scale, inv_scale ); 414 register_new_node( var_add, n_ctrl ); 415 _igvn.replace_node( n, var_add ); 416 return var_add; 417 } 418 419 // Replace (I+V) with (V+I) 420 if( n_op == Op_AddI || 421 n_op == Op_AddL || 422 n_op == Op_AddF || 423 n_op == Op_AddD || 424 n_op == Op_MulI || 425 n_op == Op_MulL || 426 n_op == Op_MulF || 427 n_op == Op_MulD ) { 428 if( n2_loop == n_loop ) { 429 assert( n1_loop != n_loop, "" ); 430 n->swap_edges(1, 2); 431 } 432 } 433 434 // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V), 435 // but not if I2 is a constant. 436 if( n_op == Op_AddP ) { 437 if( n2_loop == n_loop && n3_loop != n_loop ) { 438 if( n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con() ) { 439 Node *n22_ctrl = get_ctrl(n->in(2)->in(2)); 440 Node *n23_ctrl = get_ctrl(n->in(2)->in(3)); 441 IdealLoopTree *n22loop = get_loop( n22_ctrl ); 442 IdealLoopTree *n23_loop = get_loop( n23_ctrl ); 443 if( n22loop != n_loop && n22loop->is_member(n_loop) && 444 n23_loop == n_loop ) { 445 Node *add1 = new (C) AddPNode( n->in(1), n->in(2)->in(2), n->in(3) ); 446 // Stuff new AddP in the loop preheader 447 register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) ); 448 Node *add2 = new (C) AddPNode( n->in(1), add1, n->in(2)->in(3) ); 449 register_new_node( add2, n_ctrl ); 450 _igvn.replace_node( n, add2 ); 451 return add2; 452 } 453 } 454 } 455 456 // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V) 457 if( n2_loop != n_loop && n3_loop == n_loop ) { 458 if( n->in(3)->Opcode() == Op_AddI ) { 459 Node *V = n->in(3)->in(1); 460 Node *I = n->in(3)->in(2); 461 if( is_member(n_loop,get_ctrl(V)) ) { 462 } else { 463 Node *tmp = V; V = I; I = tmp; 464 } 465 if( !is_member(n_loop,get_ctrl(I)) ) { 466 Node *add1 = new (C) AddPNode( n->in(1), n->in(2), I ); 467 // Stuff new AddP in the loop preheader 468 register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) ); 469 Node *add2 = new (C) AddPNode( n->in(1), add1, V ); 470 register_new_node( add2, n_ctrl ); 471 _igvn.replace_node( n, add2 ); 472 return add2; 473 } 474 } 475 } 476 } 477 478 return NULL; 479 } 480 481 //------------------------------conditional_move------------------------------- 482 // Attempt to replace a Phi with a conditional move. We have some pretty 483 // strict profitability requirements. All Phis at the merge point must 484 // be converted, so we can remove the control flow. We need to limit the 485 // number of c-moves to a small handful. All code that was in the side-arms 486 // of the CFG diamond is now speculatively executed. This code has to be 487 // "cheap enough". We are pretty much limited to CFG diamonds that merge 488 // 1 or 2 items with a total of 1 or 2 ops executed speculatively. 489 Node *PhaseIdealLoop::conditional_move( Node *region ) { 490 491 assert(region->is_Region(), "sanity check"); 492 if (region->req() != 3) return NULL; 493 494 // Check for CFG diamond 495 Node *lp = region->in(1); 496 Node *rp = region->in(2); 497 if (!lp || !rp) return NULL; 498 Node *lp_c = lp->in(0); 499 if (lp_c == NULL || lp_c != rp->in(0) || !lp_c->is_If()) return NULL; 500 IfNode *iff = lp_c->as_If(); 501 502 // Check for ops pinned in an arm of the diamond. 503 // Can't remove the control flow in this case 504 if (lp->outcnt() > 1) return NULL; 505 if (rp->outcnt() > 1) return NULL; 506 507 IdealLoopTree* r_loop = get_loop(region); 508 assert(r_loop == get_loop(iff), "sanity"); 509 // Always convert to CMOVE if all results are used only outside this loop. 510 bool used_inside_loop = (r_loop == _ltree_root); 511 512 // Check profitability 513 int cost = 0; 514 int phis = 0; 515 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 516 Node *out = region->fast_out(i); 517 if (!out->is_Phi()) continue; // Ignore other control edges, etc 518 phis++; 519 PhiNode* phi = out->as_Phi(); 520 BasicType bt = phi->type()->basic_type(); 521 switch (bt) { 522 case T_FLOAT: 523 case T_DOUBLE: { 524 cost += Matcher::float_cmove_cost(); // Could be very expensive 525 break; 526 } 527 case T_LONG: { 528 cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's 529 } 530 case T_INT: // These all CMOV fine 531 case T_ADDRESS: { // (RawPtr) 532 cost++; 533 break; 534 } 535 case T_NARROWOOP: // Fall through 536 case T_OBJECT: { // Base oops are OK, but not derived oops 537 const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr(); 538 // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a 539 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus 540 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we 541 // have a Phi for the base here that we convert to a CMOVE all is well 542 // and good. But if the base is dead, we'll not make a CMOVE. Later 543 // the allocator will have to produce a base by creating a CMOVE of the 544 // relevant bases. This puts the allocator in the business of 545 // manufacturing expensive instructions, generally a bad plan. 546 // Just Say No to Conditionally-Moved Derived Pointers. 547 if (tp && tp->offset() != 0) 548 return NULL; 549 cost++; 550 break; 551 } 552 default: 553 return NULL; // In particular, can't do memory or I/O 554 } 555 // Add in cost any speculative ops 556 for (uint j = 1; j < region->req(); j++) { 557 Node *proj = region->in(j); 558 Node *inp = phi->in(j); 559 if (get_ctrl(inp) == proj) { // Found local op 560 cost++; 561 // Check for a chain of dependent ops; these will all become 562 // speculative in a CMOV. 563 for (uint k = 1; k < inp->req(); k++) 564 if (get_ctrl(inp->in(k)) == proj) 565 cost += ConditionalMoveLimit; // Too much speculative goo 566 } 567 } 568 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode. 569 // This will likely Split-If, a higher-payoff operation. 570 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) { 571 Node* use = phi->fast_out(k); 572 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr()) 573 cost += ConditionalMoveLimit; 574 // Is there a use inside the loop? 575 // Note: check only basic types since CMoveP is pinned. 576 if (!used_inside_loop && is_java_primitive(bt)) { 577 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use); 578 if (r_loop == u_loop || r_loop->is_member(u_loop)) { 579 used_inside_loop = true; 580 } 581 } 582 } 583 } 584 Node* bol = iff->in(1); 585 assert(bol->Opcode() == Op_Bool, ""); 586 int cmp_op = bol->in(1)->Opcode(); 587 // It is expensive to generate flags from a float compare. 588 // Avoid duplicated float compare. 589 if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return NULL; 590 591 float infrequent_prob = PROB_UNLIKELY_MAG(3); 592 // Ignore cost and blocks frequency if CMOVE can be moved outside the loop. 593 if (used_inside_loop) { 594 if (cost >= ConditionalMoveLimit) return NULL; // Too much goo 595 596 // BlockLayoutByFrequency optimization moves infrequent branch 597 // from hot path. No point in CMOV'ing in such case (110 is used 598 // instead of 100 to take into account not exactness of float value). 599 if (BlockLayoutByFrequency) { 600 infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f); 601 } 602 } 603 // Check for highly predictable branch. No point in CMOV'ing if 604 // we are going to predict accurately all the time. 605 if (iff->_prob < infrequent_prob || 606 iff->_prob > (1.0f - infrequent_prob)) 607 return NULL; 608 609 // -------------- 610 // Now replace all Phis with CMOV's 611 Node *cmov_ctrl = iff->in(0); 612 uint flip = (lp->Opcode() == Op_IfTrue); 613 Node_List wq; 614 while (1) { 615 PhiNode* phi = NULL; 616 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 617 Node *out = region->fast_out(i); 618 if (out->is_Phi()) { 619 phi = out->as_Phi(); 620 break; 621 } 622 } 623 if (phi == NULL) break; 624 #ifndef PRODUCT 625 if (PrintOpto && VerifyLoopOptimizations) tty->print_cr("CMOV"); 626 #endif 627 // Move speculative ops 628 wq.push(phi); 629 while (wq.size() > 0) { 630 Node *n = wq.pop(); 631 for (uint j = 1; j < n->req(); j++) { 632 Node* m = n->in(j); 633 if (m != NULL && !is_dominator(get_ctrl(m), cmov_ctrl)) { 634 #ifndef PRODUCT 635 if (PrintOpto && VerifyLoopOptimizations) { 636 tty->print(" speculate: "); 637 m->dump(); 638 } 639 #endif 640 set_ctrl(m, cmov_ctrl); 641 wq.push(m); 642 } 643 } 644 } 645 Node *cmov = CMoveNode::make( C, cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi) ); 646 register_new_node( cmov, cmov_ctrl ); 647 _igvn.replace_node( phi, cmov ); 648 #ifndef PRODUCT 649 if (TraceLoopOpts) { 650 tty->print("CMOV "); 651 r_loop->dump_head(); 652 if (Verbose) { 653 bol->in(1)->dump(1); 654 cmov->dump(1); 655 } 656 } 657 if (VerifyLoopOptimizations) verify(); 658 #endif 659 } 660 661 // The useless CFG diamond will fold up later; see the optimization in 662 // RegionNode::Ideal. 663 _igvn._worklist.push(region); 664 665 return iff->in(1); 666 } 667 668 //------------------------------split_if_with_blocks_pre----------------------- 669 // Do the real work in a non-recursive function. Data nodes want to be 670 // cloned in the pre-order so they can feed each other nicely. 671 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) { 672 // Cloning these guys is unlikely to win 673 int n_op = n->Opcode(); 674 if( n_op == Op_MergeMem ) return n; 675 if( n->is_Proj() ) return n; 676 // Do not clone-up CmpFXXX variations, as these are always 677 // followed by a CmpI 678 if( n->is_Cmp() ) return n; 679 // Attempt to use a conditional move instead of a phi/branch 680 if( ConditionalMoveLimit > 0 && n_op == Op_Region ) { 681 Node *cmov = conditional_move( n ); 682 if( cmov ) return cmov; 683 } 684 if( n->is_CFG() || n->is_LoadStore() ) 685 return n; 686 if( n_op == Op_Opaque1 || // Opaque nodes cannot be mod'd 687 n_op == Op_Opaque2 ) { 688 if( !C->major_progress() ) // If chance of no more loop opts... 689 _igvn._worklist.push(n); // maybe we'll remove them 690 return n; 691 } 692 693 if( n->is_Con() ) return n; // No cloning for Con nodes 694 695 Node *n_ctrl = get_ctrl(n); 696 if( !n_ctrl ) return n; // Dead node 697 698 // Attempt to remix address expressions for loop invariants 699 Node *m = remix_address_expressions( n ); 700 if( m ) return m; 701 702 // Determine if the Node has inputs from some local Phi. 703 // Returns the block to clone thru. 704 Node *n_blk = has_local_phi_input( n ); 705 if( !n_blk ) return n; 706 // Do not clone the trip counter through on a CountedLoop 707 // (messes up the canonical shape). 708 if( n_blk->is_CountedLoop() && n->Opcode() == Op_AddI ) return n; 709 710 // Check for having no control input; not pinned. Allow 711 // dominating control. 712 if( n->in(0) ) { 713 Node *dom = idom(n_blk); 714 if( dom_lca( n->in(0), dom ) != n->in(0) ) 715 return n; 716 } 717 // Policy: when is it profitable. You must get more wins than 718 // policy before it is considered profitable. Policy is usually 0, 719 // so 1 win is considered profitable. Big merges will require big 720 // cloning, so get a larger policy. 721 int policy = n_blk->req() >> 2; 722 723 // If the loop is a candidate for range check elimination, 724 // delay splitting through it's phi until a later loop optimization 725 if (n_blk->is_CountedLoop()) { 726 IdealLoopTree *lp = get_loop(n_blk); 727 if (lp && lp->_rce_candidate) { 728 return n; 729 } 730 } 731 732 // Use same limit as split_if_with_blocks_post 733 if( C->unique() > 35000 ) return n; // Method too big 734 735 // Split 'n' through the merge point if it is profitable 736 Node *phi = split_thru_phi( n, n_blk, policy ); 737 if (!phi) return n; 738 739 // Found a Phi to split thru! 740 // Replace 'n' with the new phi 741 _igvn.replace_node( n, phi ); 742 // Moved a load around the loop, 'en-registering' something. 743 if (n_blk->is_Loop() && n->is_Load() && 744 !phi->in(LoopNode::LoopBackControl)->is_Load()) 745 C->set_major_progress(); 746 747 return phi; 748 } 749 750 static bool merge_point_too_heavy(Compile* C, Node* region) { 751 // Bail out if the region and its phis have too many users. 752 int weight = 0; 753 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 754 weight += region->fast_out(i)->outcnt(); 755 } 756 int nodes_left = C->max_node_limit() - C->live_nodes(); 757 if (weight * 8 > nodes_left) { 758 #ifndef PRODUCT 759 if (PrintOpto) 760 tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight); 761 #endif 762 return true; 763 } else { 764 return false; 765 } 766 } 767 768 static bool merge_point_safe(Node* region) { 769 // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode 770 // having a PhiNode input. This sidesteps the dangerous case where the split 771 // ConvI2LNode may become TOP if the input Value() does not 772 // overlap the ConvI2L range, leaving a node which may not dominate its 773 // uses. 774 // A better fix for this problem can be found in the BugTraq entry, but 775 // expediency for Mantis demands this hack. 776 // 6855164: If the merge point has a FastLockNode with a PhiNode input, we stop 777 // split_if_with_blocks from splitting a block because we could not move around 778 // the FastLockNode. 779 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 780 Node* n = region->fast_out(i); 781 if (n->is_Phi()) { 782 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 783 Node* m = n->fast_out(j); 784 if (m->is_FastLock()) 785 return false; 786 #ifdef _LP64 787 if (m->Opcode() == Op_ConvI2L) 788 return false; 789 if (m->is_CastII() && m->isa_CastII()->has_range_check()) { 790 return false; 791 } 792 #endif 793 } 794 } 795 } 796 return true; 797 } 798 799 800 //------------------------------place_near_use--------------------------------- 801 // Place some computation next to use but not inside inner loops. 802 // For inner loop uses move it to the preheader area. 803 Node *PhaseIdealLoop::place_near_use( Node *useblock ) const { 804 IdealLoopTree *u_loop = get_loop( useblock ); 805 return (u_loop->_irreducible || u_loop->_child) 806 ? useblock 807 : u_loop->_head->in(LoopNode::EntryControl); 808 } 809 810 811 //------------------------------split_if_with_blocks_post---------------------- 812 // Do the real work in a non-recursive function. CFG hackery wants to be 813 // in the post-order, so it can dirty the I-DOM info and not use the dirtied 814 // info. 815 void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) { 816 817 // Cloning Cmp through Phi's involves the split-if transform. 818 // FastLock is not used by an If 819 if( n->is_Cmp() && !n->is_FastLock() ) { 820 if( C->unique() > 35000 ) return; // Method too big 821 822 // Do not do 'split-if' if irreducible loops are present. 823 if( _has_irreducible_loops ) 824 return; 825 826 Node *n_ctrl = get_ctrl(n); 827 // Determine if the Node has inputs from some local Phi. 828 // Returns the block to clone thru. 829 Node *n_blk = has_local_phi_input( n ); 830 if( n_blk != n_ctrl ) return; 831 832 if( merge_point_too_heavy(C, n_ctrl) ) 833 return; 834 835 if( n->outcnt() != 1 ) return; // Multiple bool's from 1 compare? 836 Node *bol = n->unique_out(); 837 assert( bol->is_Bool(), "expect a bool here" ); 838 if( bol->outcnt() != 1 ) return;// Multiple branches from 1 compare? 839 Node *iff = bol->unique_out(); 840 841 // Check some safety conditions 842 if( iff->is_If() ) { // Classic split-if? 843 if( iff->in(0) != n_ctrl ) return; // Compare must be in same blk as if 844 } else if (iff->is_CMove()) { // Trying to split-up a CMOVE 845 // Can't split CMove with different control edge. 846 if (iff->in(0) != NULL && iff->in(0) != n_ctrl ) return; 847 if( get_ctrl(iff->in(2)) == n_ctrl || 848 get_ctrl(iff->in(3)) == n_ctrl ) 849 return; // Inputs not yet split-up 850 if ( get_loop(n_ctrl) != get_loop(get_ctrl(iff)) ) { 851 return; // Loop-invar test gates loop-varying CMOVE 852 } 853 } else { 854 return; // some other kind of node, such as an Allocate 855 } 856 857 // Do not do 'split-if' if some paths are dead. First do dead code 858 // elimination and then see if its still profitable. 859 for( uint i = 1; i < n_ctrl->req(); i++ ) 860 if( n_ctrl->in(i) == C->top() ) 861 return; 862 863 // When is split-if profitable? Every 'win' on means some control flow 864 // goes dead, so it's almost always a win. 865 int policy = 0; 866 // If trying to do a 'Split-If' at the loop head, it is only 867 // profitable if the cmp folds up on BOTH paths. Otherwise we 868 // risk peeling a loop forever. 869 870 // CNC - Disabled for now. Requires careful handling of loop 871 // body selection for the cloned code. Also, make sure we check 872 // for any input path not being in the same loop as n_ctrl. For 873 // irreducible loops we cannot check for 'n_ctrl->is_Loop()' 874 // because the alternative loop entry points won't be converted 875 // into LoopNodes. 876 IdealLoopTree *n_loop = get_loop(n_ctrl); 877 for( uint j = 1; j < n_ctrl->req(); j++ ) 878 if( get_loop(n_ctrl->in(j)) != n_loop ) 879 return; 880 881 // Check for safety of the merge point. 882 if( !merge_point_safe(n_ctrl) ) { 883 return; 884 } 885 886 // Split compare 'n' through the merge point if it is profitable 887 Node *phi = split_thru_phi( n, n_ctrl, policy ); 888 if( !phi ) return; 889 890 // Found a Phi to split thru! 891 // Replace 'n' with the new phi 892 _igvn.replace_node( n, phi ); 893 894 // Now split the bool up thru the phi 895 Node *bolphi = split_thru_phi( bol, n_ctrl, -1 ); 896 guarantee(bolphi != NULL, "null boolean phi node"); 897 898 _igvn.replace_node( bol, bolphi ); 899 assert( iff->in(1) == bolphi, "" ); 900 901 if( bolphi->Value(&_igvn)->singleton() ) 902 return; 903 904 // Conditional-move? Must split up now 905 if( !iff->is_If() ) { 906 Node *cmovphi = split_thru_phi( iff, n_ctrl, -1 ); 907 _igvn.replace_node( iff, cmovphi ); 908 return; 909 } 910 911 // Now split the IF 912 do_split_if( iff ); 913 return; 914 } 915 916 // Check for an IF ready to split; one that has its 917 // condition codes input coming from a Phi at the block start. 918 int n_op = n->Opcode(); 919 920 // Check for an IF being dominated by another IF same test 921 if (n_op == Op_If) { 922 Node *bol = n->in(1); 923 uint max = bol->outcnt(); 924 // Check for same test used more than once? 925 if (max > 1 && bol->is_Bool()) { 926 // Search up IDOMs to see if this IF is dominated. 927 Node *cutoff = get_ctrl(bol); 928 929 // Now search up IDOMs till cutoff, looking for a dominating test 930 Node *prevdom = n; 931 Node *dom = idom(prevdom); 932 while (dom != cutoff) { 933 if (dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom) { 934 // Replace the dominated test with an obvious true or false. 935 // Place it on the IGVN worklist for later cleanup. 936 C->set_major_progress(); 937 dominated_by(prevdom, n, false, true); 938 #ifndef PRODUCT 939 if( VerifyLoopOptimizations ) verify(); 940 #endif 941 return; 942 } 943 prevdom = dom; 944 dom = idom(prevdom); 945 } 946 } 947 } 948 949 // See if a shared loop-varying computation has no loop-varying uses. 950 // Happens if something is only used for JVM state in uncommon trap exits, 951 // like various versions of induction variable+offset. Clone the 952 // computation per usage to allow it to sink out of the loop. 953 if (has_ctrl(n) && !n->in(0)) {// n not dead and has no control edge (can float about) 954 Node *n_ctrl = get_ctrl(n); 955 IdealLoopTree *n_loop = get_loop(n_ctrl); 956 if( n_loop != _ltree_root ) { 957 DUIterator_Fast imax, i = n->fast_outs(imax); 958 for (; i < imax; i++) { 959 Node* u = n->fast_out(i); 960 if( !has_ctrl(u) ) break; // Found control user 961 IdealLoopTree *u_loop = get_loop(get_ctrl(u)); 962 if( u_loop == n_loop ) break; // Found loop-varying use 963 if( n_loop->is_member( u_loop ) ) break; // Found use in inner loop 964 if( u->Opcode() == Op_Opaque1 ) break; // Found loop limit, bugfix for 4677003 965 } 966 bool did_break = (i < imax); // Did we break out of the previous loop? 967 if (!did_break && n->outcnt() > 1) { // All uses in outer loops! 968 Node *late_load_ctrl = NULL; 969 if (n->is_Load()) { 970 // If n is a load, get and save the result from get_late_ctrl(), 971 // to be later used in calculating the control for n's clones. 972 clear_dom_lca_tags(); 973 late_load_ctrl = get_late_ctrl(n, n_ctrl); 974 } 975 // If n is a load, and the late control is the same as the current 976 // control, then the cloning of n is a pointless exercise, because 977 // GVN will ensure that we end up where we started. 978 if (!n->is_Load() || late_load_ctrl != n_ctrl) { 979 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; ) { 980 Node *u = n->last_out(j); // Clone private computation per use 981 _igvn.rehash_node_delayed(u); 982 Node *x = n->clone(); // Clone computation 983 Node *x_ctrl = NULL; 984 if( u->is_Phi() ) { 985 // Replace all uses of normal nodes. Replace Phi uses 986 // individually, so the separate Nodes can sink down 987 // different paths. 988 uint k = 1; 989 while( u->in(k) != n ) k++; 990 u->set_req( k, x ); 991 // x goes next to Phi input path 992 x_ctrl = u->in(0)->in(k); 993 --j; 994 } else { // Normal use 995 // Replace all uses 996 for( uint k = 0; k < u->req(); k++ ) { 997 if( u->in(k) == n ) { 998 u->set_req( k, x ); 999 --j; 1000 } 1001 } 1002 x_ctrl = get_ctrl(u); 1003 } 1004 1005 // Find control for 'x' next to use but not inside inner loops. 1006 // For inner loop uses get the preheader area. 1007 x_ctrl = place_near_use(x_ctrl); 1008 1009 if (n->is_Load()) { 1010 // For loads, add a control edge to a CFG node outside of the loop 1011 // to force them to not combine and return back inside the loop 1012 // during GVN optimization (4641526). 1013 // 1014 // Because we are setting the actual control input, factor in 1015 // the result from get_late_ctrl() so we respect any 1016 // anti-dependences. (6233005). 1017 x_ctrl = dom_lca(late_load_ctrl, x_ctrl); 1018 1019 // Don't allow the control input to be a CFG splitting node. 1020 // Such nodes should only have ProjNodes as outs, e.g. IfNode 1021 // should only have IfTrueNode and IfFalseNode (4985384). 1022 x_ctrl = find_non_split_ctrl(x_ctrl); 1023 assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone"); 1024 1025 x->set_req(0, x_ctrl); 1026 } 1027 register_new_node(x, x_ctrl); 1028 1029 // Some institutional knowledge is needed here: 'x' is 1030 // yanked because if the optimizer runs GVN on it all the 1031 // cloned x's will common up and undo this optimization and 1032 // be forced back in the loop. This is annoying because it 1033 // makes +VerifyOpto report false-positives on progress. I 1034 // tried setting control edges on the x's to force them to 1035 // not combine, but the matching gets worried when it tries 1036 // to fold a StoreP and an AddP together (as part of an 1037 // address expression) and the AddP and StoreP have 1038 // different controls. 1039 if (!x->is_Load() && !x->is_DecodeNarrowPtr()) _igvn._worklist.yank(x); 1040 } 1041 _igvn.remove_dead_node(n); 1042 } 1043 } 1044 } 1045 } 1046 1047 // Check for Opaque2's who's loop has disappeared - who's input is in the 1048 // same loop nest as their output. Remove 'em, they are no longer useful. 1049 if( n_op == Op_Opaque2 && 1050 n->in(1) != NULL && 1051 get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) { 1052 _igvn.replace_node( n, n->in(1) ); 1053 } 1054 } 1055 1056 //------------------------------split_if_with_blocks--------------------------- 1057 // Check for aggressive application of 'split-if' optimization, 1058 // using basic block level info. 1059 void PhaseIdealLoop::split_if_with_blocks( VectorSet &visited, Node_Stack &nstack ) { 1060 Node *n = C->root(); 1061 visited.set(n->_idx); // first, mark node as visited 1062 // Do pre-visit work for root 1063 n = split_if_with_blocks_pre( n ); 1064 uint cnt = n->outcnt(); 1065 uint i = 0; 1066 while (true) { 1067 // Visit all children 1068 if (i < cnt) { 1069 Node* use = n->raw_out(i); 1070 ++i; 1071 if (use->outcnt() != 0 && !visited.test_set(use->_idx)) { 1072 // Now do pre-visit work for this use 1073 use = split_if_with_blocks_pre( use ); 1074 nstack.push(n, i); // Save parent and next use's index. 1075 n = use; // Process all children of current use. 1076 cnt = use->outcnt(); 1077 i = 0; 1078 } 1079 } 1080 else { 1081 // All of n's children have been processed, complete post-processing. 1082 if (cnt != 0 && !n->is_Con()) { 1083 assert(has_node(n), "no dead nodes"); 1084 split_if_with_blocks_post( n ); 1085 } 1086 if (nstack.is_empty()) { 1087 // Finished all nodes on stack. 1088 break; 1089 } 1090 // Get saved parent node and next use's index. Visit the rest of uses. 1091 n = nstack.node(); 1092 cnt = n->outcnt(); 1093 i = nstack.index(); 1094 nstack.pop(); 1095 } 1096 } 1097 } 1098 1099 1100 //============================================================================= 1101 // 1102 // C L O N E A L O O P B O D Y 1103 // 1104 1105 //------------------------------clone_iff-------------------------------------- 1106 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. 1107 // "Nearly" because all Nodes have been cloned from the original in the loop, 1108 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs 1109 // through the Phi recursively, and return a Bool. 1110 BoolNode *PhaseIdealLoop::clone_iff( PhiNode *phi, IdealLoopTree *loop ) { 1111 1112 // Convert this Phi into a Phi merging Bools 1113 uint i; 1114 for( i = 1; i < phi->req(); i++ ) { 1115 Node *b = phi->in(i); 1116 if( b->is_Phi() ) { 1117 _igvn.replace_input_of(phi, i, clone_iff( b->as_Phi(), loop )); 1118 } else { 1119 assert( b->is_Bool(), "" ); 1120 } 1121 } 1122 1123 Node *sample_bool = phi->in(1); 1124 Node *sample_cmp = sample_bool->in(1); 1125 1126 // Make Phis to merge the Cmp's inputs. 1127 PhiNode *phi1 = new (C) PhiNode( phi->in(0), Type::TOP ); 1128 PhiNode *phi2 = new (C) PhiNode( phi->in(0), Type::TOP ); 1129 for( i = 1; i < phi->req(); i++ ) { 1130 Node *n1 = phi->in(i)->in(1)->in(1); 1131 Node *n2 = phi->in(i)->in(1)->in(2); 1132 phi1->set_req( i, n1 ); 1133 phi2->set_req( i, n2 ); 1134 phi1->set_type( phi1->type()->meet_speculative(n1->bottom_type())); 1135 phi2->set_type( phi2->type()->meet_speculative(n2->bottom_type())); 1136 } 1137 // See if these Phis have been made before. 1138 // Register with optimizer 1139 Node *hit1 = _igvn.hash_find_insert(phi1); 1140 if( hit1 ) { // Hit, toss just made Phi 1141 _igvn.remove_dead_node(phi1); // Remove new phi 1142 assert( hit1->is_Phi(), "" ); 1143 phi1 = (PhiNode*)hit1; // Use existing phi 1144 } else { // Miss 1145 _igvn.register_new_node_with_optimizer(phi1); 1146 } 1147 Node *hit2 = _igvn.hash_find_insert(phi2); 1148 if( hit2 ) { // Hit, toss just made Phi 1149 _igvn.remove_dead_node(phi2); // Remove new phi 1150 assert( hit2->is_Phi(), "" ); 1151 phi2 = (PhiNode*)hit2; // Use existing phi 1152 } else { // Miss 1153 _igvn.register_new_node_with_optimizer(phi2); 1154 } 1155 // Register Phis with loop/block info 1156 set_ctrl(phi1, phi->in(0)); 1157 set_ctrl(phi2, phi->in(0)); 1158 // Make a new Cmp 1159 Node *cmp = sample_cmp->clone(); 1160 cmp->set_req( 1, phi1 ); 1161 cmp->set_req( 2, phi2 ); 1162 _igvn.register_new_node_with_optimizer(cmp); 1163 set_ctrl(cmp, phi->in(0)); 1164 1165 // Make a new Bool 1166 Node *b = sample_bool->clone(); 1167 b->set_req(1,cmp); 1168 _igvn.register_new_node_with_optimizer(b); 1169 set_ctrl(b, phi->in(0)); 1170 1171 assert( b->is_Bool(), "" ); 1172 return (BoolNode*)b; 1173 } 1174 1175 //------------------------------clone_bool------------------------------------- 1176 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. 1177 // "Nearly" because all Nodes have been cloned from the original in the loop, 1178 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs 1179 // through the Phi recursively, and return a Bool. 1180 CmpNode *PhaseIdealLoop::clone_bool( PhiNode *phi, IdealLoopTree *loop ) { 1181 uint i; 1182 // Convert this Phi into a Phi merging Bools 1183 for( i = 1; i < phi->req(); i++ ) { 1184 Node *b = phi->in(i); 1185 if( b->is_Phi() ) { 1186 _igvn.replace_input_of(phi, i, clone_bool( b->as_Phi(), loop )); 1187 } else { 1188 assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" ); 1189 } 1190 } 1191 1192 Node *sample_cmp = phi->in(1); 1193 1194 // Make Phis to merge the Cmp's inputs. 1195 PhiNode *phi1 = new (C) PhiNode( phi->in(0), Type::TOP ); 1196 PhiNode *phi2 = new (C) PhiNode( phi->in(0), Type::TOP ); 1197 for( uint j = 1; j < phi->req(); j++ ) { 1198 Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP 1199 Node *n1, *n2; 1200 if( cmp_top->is_Cmp() ) { 1201 n1 = cmp_top->in(1); 1202 n2 = cmp_top->in(2); 1203 } else { 1204 n1 = n2 = cmp_top; 1205 } 1206 phi1->set_req( j, n1 ); 1207 phi2->set_req( j, n2 ); 1208 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type())); 1209 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type())); 1210 } 1211 1212 // See if these Phis have been made before. 1213 // Register with optimizer 1214 Node *hit1 = _igvn.hash_find_insert(phi1); 1215 if( hit1 ) { // Hit, toss just made Phi 1216 _igvn.remove_dead_node(phi1); // Remove new phi 1217 assert( hit1->is_Phi(), "" ); 1218 phi1 = (PhiNode*)hit1; // Use existing phi 1219 } else { // Miss 1220 _igvn.register_new_node_with_optimizer(phi1); 1221 } 1222 Node *hit2 = _igvn.hash_find_insert(phi2); 1223 if( hit2 ) { // Hit, toss just made Phi 1224 _igvn.remove_dead_node(phi2); // Remove new phi 1225 assert( hit2->is_Phi(), "" ); 1226 phi2 = (PhiNode*)hit2; // Use existing phi 1227 } else { // Miss 1228 _igvn.register_new_node_with_optimizer(phi2); 1229 } 1230 // Register Phis with loop/block info 1231 set_ctrl(phi1, phi->in(0)); 1232 set_ctrl(phi2, phi->in(0)); 1233 // Make a new Cmp 1234 Node *cmp = sample_cmp->clone(); 1235 cmp->set_req( 1, phi1 ); 1236 cmp->set_req( 2, phi2 ); 1237 _igvn.register_new_node_with_optimizer(cmp); 1238 set_ctrl(cmp, phi->in(0)); 1239 1240 assert( cmp->is_Cmp(), "" ); 1241 return (CmpNode*)cmp; 1242 } 1243 1244 //------------------------------sink_use--------------------------------------- 1245 // If 'use' was in the loop-exit block, it now needs to be sunk 1246 // below the post-loop merge point. 1247 void PhaseIdealLoop::sink_use( Node *use, Node *post_loop ) { 1248 if (!use->is_CFG() && get_ctrl(use) == post_loop->in(2)) { 1249 set_ctrl(use, post_loop); 1250 for (DUIterator j = use->outs(); use->has_out(j); j++) 1251 sink_use(use->out(j), post_loop); 1252 } 1253 } 1254 1255 //------------------------------clone_loop------------------------------------- 1256 // 1257 // C L O N E A L O O P B O D Y 1258 // 1259 // This is the basic building block of the loop optimizations. It clones an 1260 // entire loop body. It makes an old_new loop body mapping; with this mapping 1261 // you can find the new-loop equivalent to an old-loop node. All new-loop 1262 // nodes are exactly equal to their old-loop counterparts, all edges are the 1263 // same. All exits from the old-loop now have a RegionNode that merges the 1264 // equivalent new-loop path. This is true even for the normal "loop-exit" 1265 // condition. All uses of loop-invariant old-loop values now come from (one 1266 // or more) Phis that merge their new-loop equivalents. 1267 // 1268 // This operation leaves the graph in an illegal state: there are two valid 1269 // control edges coming from the loop pre-header to both loop bodies. I'll 1270 // definitely have to hack the graph after running this transform. 1271 // 1272 // From this building block I will further edit edges to perform loop peeling 1273 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc. 1274 // 1275 // Parameter side_by_size_idom: 1276 // When side_by_size_idom is NULL, the dominator tree is constructed for 1277 // the clone loop to dominate the original. Used in construction of 1278 // pre-main-post loop sequence. 1279 // When nonnull, the clone and original are side-by-side, both are 1280 // dominated by the side_by_side_idom node. Used in construction of 1281 // unswitched loops. 1282 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd, 1283 Node* side_by_side_idom) { 1284 1285 // Step 1: Clone the loop body. Make the old->new mapping. 1286 uint i; 1287 for( i = 0; i < loop->_body.size(); i++ ) { 1288 Node *old = loop->_body.at(i); 1289 Node *nnn = old->clone(); 1290 old_new.map( old->_idx, nnn ); 1291 _igvn.register_new_node_with_optimizer(nnn); 1292 } 1293 1294 1295 // Step 2: Fix the edges in the new body. If the old input is outside the 1296 // loop use it. If the old input is INside the loop, use the corresponding 1297 // new node instead. 1298 for( i = 0; i < loop->_body.size(); i++ ) { 1299 Node *old = loop->_body.at(i); 1300 Node *nnn = old_new[old->_idx]; 1301 // Fix CFG/Loop controlling the new node 1302 if (has_ctrl(old)) { 1303 set_ctrl(nnn, old_new[get_ctrl(old)->_idx]); 1304 } else { 1305 set_loop(nnn, loop->_parent); 1306 if (old->outcnt() > 0) { 1307 set_idom( nnn, old_new[idom(old)->_idx], dd ); 1308 } 1309 } 1310 // Correct edges to the new node 1311 for( uint j = 0; j < nnn->req(); j++ ) { 1312 Node *n = nnn->in(j); 1313 if( n ) { 1314 IdealLoopTree *old_in_loop = get_loop( has_ctrl(n) ? get_ctrl(n) : n ); 1315 if( loop->is_member( old_in_loop ) ) 1316 nnn->set_req(j, old_new[n->_idx]); 1317 } 1318 } 1319 _igvn.hash_find_insert(nnn); 1320 } 1321 Node *newhead = old_new[loop->_head->_idx]; 1322 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd); 1323 1324 1325 // Step 3: Now fix control uses. Loop varying control uses have already 1326 // been fixed up (as part of all input edges in Step 2). Loop invariant 1327 // control uses must be either an IfFalse or an IfTrue. Make a merge 1328 // point to merge the old and new IfFalse/IfTrue nodes; make the use 1329 // refer to this. 1330 ResourceArea *area = Thread::current()->resource_area(); 1331 Node_List worklist(area); 1332 uint new_counter = C->unique(); 1333 for( i = 0; i < loop->_body.size(); i++ ) { 1334 Node* old = loop->_body.at(i); 1335 if( !old->is_CFG() ) continue; 1336 Node* nnn = old_new[old->_idx]; 1337 1338 // Copy uses to a worklist, so I can munge the def-use info 1339 // with impunity. 1340 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) 1341 worklist.push(old->fast_out(j)); 1342 1343 while( worklist.size() ) { // Visit all uses 1344 Node *use = worklist.pop(); 1345 if (!has_node(use)) continue; // Ignore dead nodes 1346 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use ); 1347 if( !loop->is_member( use_loop ) && use->is_CFG() ) { 1348 // Both OLD and USE are CFG nodes here. 1349 assert( use->is_Proj(), "" ); 1350 1351 // Clone the loop exit control projection 1352 Node *newuse = use->clone(); 1353 newuse->set_req(0,nnn); 1354 _igvn.register_new_node_with_optimizer(newuse); 1355 set_loop(newuse, use_loop); 1356 set_idom(newuse, nnn, dom_depth(nnn) + 1 ); 1357 1358 // We need a Region to merge the exit from the peeled body and the 1359 // exit from the old loop body. 1360 RegionNode *r = new (C) RegionNode(3); 1361 // Map the old use to the new merge point 1362 old_new.map( use->_idx, r ); 1363 uint dd_r = MIN2(dom_depth(newuse),dom_depth(use)); 1364 assert( dd_r >= dom_depth(dom_lca(newuse,use)), "" ); 1365 1366 // The original user of 'use' uses 'r' instead. 1367 for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) { 1368 Node* useuse = use->last_out(l); 1369 _igvn.rehash_node_delayed(useuse); 1370 uint uses_found = 0; 1371 if( useuse->in(0) == use ) { 1372 useuse->set_req(0, r); 1373 uses_found++; 1374 if( useuse->is_CFG() ) { 1375 assert( dom_depth(useuse) > dd_r, "" ); 1376 set_idom(useuse, r, dom_depth(useuse)); 1377 } 1378 } 1379 for( uint k = 1; k < useuse->req(); k++ ) { 1380 if( useuse->in(k) == use ) { 1381 useuse->set_req(k, r); 1382 uses_found++; 1383 } 1384 } 1385 l -= uses_found; // we deleted 1 or more copies of this edge 1386 } 1387 1388 // Now finish up 'r' 1389 r->set_req( 1, newuse ); 1390 r->set_req( 2, use ); 1391 _igvn.register_new_node_with_optimizer(r); 1392 set_loop(r, use_loop); 1393 set_idom(r, !side_by_side_idom ? newuse->in(0) : side_by_side_idom, dd_r); 1394 } // End of if a loop-exit test 1395 } 1396 } 1397 1398 // Step 4: If loop-invariant use is not control, it must be dominated by a 1399 // loop exit IfFalse/IfTrue. Find "proper" loop exit. Make a Region 1400 // there if needed. Make a Phi there merging old and new used values. 1401 Node_List *split_if_set = NULL; 1402 Node_List *split_bool_set = NULL; 1403 Node_List *split_cex_set = NULL; 1404 for( i = 0; i < loop->_body.size(); i++ ) { 1405 Node* old = loop->_body.at(i); 1406 Node* nnn = old_new[old->_idx]; 1407 // Copy uses to a worklist, so I can munge the def-use info 1408 // with impunity. 1409 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) 1410 worklist.push(old->fast_out(j)); 1411 1412 while( worklist.size() ) { 1413 Node *use = worklist.pop(); 1414 if (!has_node(use)) continue; // Ignore dead nodes 1415 if (use->in(0) == C->top()) continue; 1416 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use ); 1417 // Check for data-use outside of loop - at least one of OLD or USE 1418 // must not be a CFG node. 1419 if( !loop->is_member( use_loop ) && (!old->is_CFG() || !use->is_CFG())) { 1420 1421 // If the Data use is an IF, that means we have an IF outside of the 1422 // loop that is switching on a condition that is set inside of the 1423 // loop. Happens if people set a loop-exit flag; then test the flag 1424 // in the loop to break the loop, then test is again outside of the 1425 // loop to determine which way the loop exited. 1426 // Loop predicate If node connects to Bool node through Opaque1 node. 1427 if (use->is_If() || use->is_CMove() || C->is_predicate_opaq(use)) { 1428 // Since this code is highly unlikely, we lazily build the worklist 1429 // of such Nodes to go split. 1430 if( !split_if_set ) 1431 split_if_set = new Node_List(area); 1432 split_if_set->push(use); 1433 } 1434 if( use->is_Bool() ) { 1435 if( !split_bool_set ) 1436 split_bool_set = new Node_List(area); 1437 split_bool_set->push(use); 1438 } 1439 if( use->Opcode() == Op_CreateEx ) { 1440 if( !split_cex_set ) 1441 split_cex_set = new Node_List(area); 1442 split_cex_set->push(use); 1443 } 1444 1445 1446 // Get "block" use is in 1447 uint idx = 0; 1448 while( use->in(idx) != old ) idx++; 1449 Node *prev = use->is_CFG() ? use : get_ctrl(use); 1450 assert( !loop->is_member( get_loop( prev ) ), "" ); 1451 Node *cfg = prev->_idx >= new_counter 1452 ? prev->in(2) 1453 : idom(prev); 1454 if( use->is_Phi() ) // Phi use is in prior block 1455 cfg = prev->in(idx); // NOT in block of Phi itself 1456 if (cfg->is_top()) { // Use is dead? 1457 _igvn.replace_input_of(use, idx, C->top()); 1458 continue; 1459 } 1460 1461 while( !loop->is_member( get_loop( cfg ) ) ) { 1462 prev = cfg; 1463 cfg = cfg->_idx >= new_counter ? cfg->in(2) : idom(cfg); 1464 } 1465 // If the use occurs after merging several exits from the loop, then 1466 // old value must have dominated all those exits. Since the same old 1467 // value was used on all those exits we did not need a Phi at this 1468 // merge point. NOW we do need a Phi here. Each loop exit value 1469 // is now merged with the peeled body exit; each exit gets its own 1470 // private Phi and those Phis need to be merged here. 1471 Node *phi; 1472 if( prev->is_Region() ) { 1473 if( idx == 0 ) { // Updating control edge? 1474 phi = prev; // Just use existing control 1475 } else { // Else need a new Phi 1476 phi = PhiNode::make( prev, old ); 1477 // Now recursively fix up the new uses of old! 1478 for( uint i = 1; i < prev->req(); i++ ) { 1479 worklist.push(phi); // Onto worklist once for each 'old' input 1480 } 1481 } 1482 } else { 1483 // Get new RegionNode merging old and new loop exits 1484 prev = old_new[prev->_idx]; 1485 assert( prev, "just made this in step 7" ); 1486 if( idx == 0 ) { // Updating control edge? 1487 phi = prev; // Just use existing control 1488 } else { // Else need a new Phi 1489 // Make a new Phi merging data values properly 1490 phi = PhiNode::make( prev, old ); 1491 phi->set_req( 1, nnn ); 1492 } 1493 } 1494 // If inserting a new Phi, check for prior hits 1495 if( idx != 0 ) { 1496 Node *hit = _igvn.hash_find_insert(phi); 1497 if( hit == NULL ) { 1498 _igvn.register_new_node_with_optimizer(phi); // Register new phi 1499 } else { // or 1500 // Remove the new phi from the graph and use the hit 1501 _igvn.remove_dead_node(phi); 1502 phi = hit; // Use existing phi 1503 } 1504 set_ctrl(phi, prev); 1505 } 1506 // Make 'use' use the Phi instead of the old loop body exit value 1507 _igvn.replace_input_of(use, idx, phi); 1508 if( use->_idx >= new_counter ) { // If updating new phis 1509 // Not needed for correctness, but prevents a weak assert 1510 // in AddPNode from tripping (when we end up with different 1511 // base & derived Phis that will become the same after 1512 // IGVN does CSE). 1513 Node *hit = _igvn.hash_find_insert(use); 1514 if( hit ) // Go ahead and re-hash for hits. 1515 _igvn.replace_node( use, hit ); 1516 } 1517 1518 // If 'use' was in the loop-exit block, it now needs to be sunk 1519 // below the post-loop merge point. 1520 sink_use( use, prev ); 1521 } 1522 } 1523 } 1524 1525 // Check for IFs that need splitting/cloning. Happens if an IF outside of 1526 // the loop uses a condition set in the loop. The original IF probably 1527 // takes control from one or more OLD Regions (which in turn get from NEW 1528 // Regions). In any case, there will be a set of Phis for each merge point 1529 // from the IF up to where the original BOOL def exists the loop. 1530 if( split_if_set ) { 1531 while( split_if_set->size() ) { 1532 Node *iff = split_if_set->pop(); 1533 if( iff->in(1)->is_Phi() ) { 1534 BoolNode *b = clone_iff( iff->in(1)->as_Phi(), loop ); 1535 _igvn.replace_input_of(iff, 1, b); 1536 } 1537 } 1538 } 1539 if( split_bool_set ) { 1540 while( split_bool_set->size() ) { 1541 Node *b = split_bool_set->pop(); 1542 Node *phi = b->in(1); 1543 assert( phi->is_Phi(), "" ); 1544 CmpNode *cmp = clone_bool( (PhiNode*)phi, loop ); 1545 _igvn.replace_input_of(b, 1, cmp); 1546 } 1547 } 1548 if( split_cex_set ) { 1549 while( split_cex_set->size() ) { 1550 Node *b = split_cex_set->pop(); 1551 assert( b->in(0)->is_Region(), "" ); 1552 assert( b->in(1)->is_Phi(), "" ); 1553 assert( b->in(0)->in(0) == b->in(1)->in(0), "" ); 1554 split_up( b, b->in(0), NULL ); 1555 } 1556 } 1557 1558 } 1559 1560 1561 //---------------------- stride_of_possible_iv ------------------------------------- 1562 // Looks for an iff/bool/comp with one operand of the compare 1563 // being a cycle involving an add and a phi, 1564 // with an optional truncation (left-shift followed by a right-shift) 1565 // of the add. Returns zero if not an iv. 1566 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) { 1567 Node* trunc1 = NULL; 1568 Node* trunc2 = NULL; 1569 const TypeInt* ttype = NULL; 1570 if (!iff->is_If() || iff->in(1) == NULL || !iff->in(1)->is_Bool()) { 1571 return 0; 1572 } 1573 BoolNode* bl = iff->in(1)->as_Bool(); 1574 Node* cmp = bl->in(1); 1575 if (!cmp || cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU) { 1576 return 0; 1577 } 1578 // Must have an invariant operand 1579 if (is_member(get_loop(iff), get_ctrl(cmp->in(2)))) { 1580 return 0; 1581 } 1582 Node* add2 = NULL; 1583 Node* cmp1 = cmp->in(1); 1584 if (cmp1->is_Phi()) { 1585 // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) ))) 1586 Node* phi = cmp1; 1587 for (uint i = 1; i < phi->req(); i++) { 1588 Node* in = phi->in(i); 1589 Node* add = CountedLoopNode::match_incr_with_optional_truncation(in, 1590 &trunc1, &trunc2, &ttype); 1591 if (add && add->in(1) == phi) { 1592 add2 = add->in(2); 1593 break; 1594 } 1595 } 1596 } else { 1597 // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) ))) 1598 Node* addtrunc = cmp1; 1599 Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc, 1600 &trunc1, &trunc2, &ttype); 1601 if (add && add->in(1)->is_Phi()) { 1602 Node* phi = add->in(1); 1603 for (uint i = 1; i < phi->req(); i++) { 1604 if (phi->in(i) == addtrunc) { 1605 add2 = add->in(2); 1606 break; 1607 } 1608 } 1609 } 1610 } 1611 if (add2 != NULL) { 1612 const TypeInt* add2t = _igvn.type(add2)->is_int(); 1613 if (add2t->is_con()) { 1614 return add2t->get_con(); 1615 } 1616 } 1617 return 0; 1618 } 1619 1620 1621 //---------------------- stay_in_loop ------------------------------------- 1622 // Return the (unique) control output node that's in the loop (if it exists.) 1623 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) { 1624 Node* unique = NULL; 1625 if (!n) return NULL; 1626 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1627 Node* use = n->fast_out(i); 1628 if (!has_ctrl(use) && loop->is_member(get_loop(use))) { 1629 if (unique != NULL) { 1630 return NULL; 1631 } 1632 unique = use; 1633 } 1634 } 1635 return unique; 1636 } 1637 1638 //------------------------------ register_node ------------------------------------- 1639 // Utility to register node "n" with PhaseIdealLoop 1640 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth) { 1641 _igvn.register_new_node_with_optimizer(n); 1642 loop->_body.push(n); 1643 if (n->is_CFG()) { 1644 set_loop(n, loop); 1645 set_idom(n, pred, ddepth); 1646 } else { 1647 set_ctrl(n, pred); 1648 } 1649 } 1650 1651 //------------------------------ proj_clone ------------------------------------- 1652 // Utility to create an if-projection 1653 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) { 1654 ProjNode* c = p->clone()->as_Proj(); 1655 c->set_req(0, iff); 1656 return c; 1657 } 1658 1659 //------------------------------ short_circuit_if ------------------------------------- 1660 // Force the iff control output to be the live_proj 1661 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) { 1662 guarantee(live_proj != NULL, "null projection"); 1663 int proj_con = live_proj->_con; 1664 assert(proj_con == 0 || proj_con == 1, "false or true projection"); 1665 Node *con = _igvn.intcon(proj_con); 1666 set_ctrl(con, C->root()); 1667 if (iff) { 1668 iff->set_req(1, con); 1669 } 1670 return con; 1671 } 1672 1673 //------------------------------ insert_if_before_proj ------------------------------------- 1674 // Insert a new if before an if projection (* - new node) 1675 // 1676 // before 1677 // if(test) 1678 // / \ 1679 // v v 1680 // other-proj proj (arg) 1681 // 1682 // after 1683 // if(test) 1684 // / \ 1685 // / v 1686 // | * proj-clone 1687 // v | 1688 // other-proj v 1689 // * new_if(relop(cmp[IU](left,right))) 1690 // / \ 1691 // v v 1692 // * new-proj proj 1693 // (returned) 1694 // 1695 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) { 1696 IfNode* iff = proj->in(0)->as_If(); 1697 IdealLoopTree *loop = get_loop(proj); 1698 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj(); 1699 int ddepth = dom_depth(proj); 1700 1701 _igvn.rehash_node_delayed(iff); 1702 _igvn.rehash_node_delayed(proj); 1703 1704 proj->set_req(0, NULL); // temporary disconnect 1705 ProjNode* proj2 = proj_clone(proj, iff); 1706 register_node(proj2, loop, iff, ddepth); 1707 1708 Node* cmp = Signed ? (Node*) new (C)CmpINode(left, right) : (Node*) new (C)CmpUNode(left, right); 1709 register_node(cmp, loop, proj2, ddepth); 1710 1711 BoolNode* bol = new (C)BoolNode(cmp, relop); 1712 register_node(bol, loop, proj2, ddepth); 1713 1714 IfNode* new_if = new (C)IfNode(proj2, bol, iff->_prob, iff->_fcnt); 1715 register_node(new_if, loop, proj2, ddepth); 1716 1717 proj->set_req(0, new_if); // reattach 1718 set_idom(proj, new_if, ddepth); 1719 1720 ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj(); 1721 guarantee(new_exit != NULL, "null exit node"); 1722 register_node(new_exit, get_loop(other_proj), new_if, ddepth); 1723 1724 return new_exit; 1725 } 1726 1727 //------------------------------ insert_region_before_proj ------------------------------------- 1728 // Insert a region before an if projection (* - new node) 1729 // 1730 // before 1731 // if(test) 1732 // / | 1733 // v | 1734 // proj v 1735 // other-proj 1736 // 1737 // after 1738 // if(test) 1739 // / | 1740 // v | 1741 // * proj-clone v 1742 // | other-proj 1743 // v 1744 // * new-region 1745 // | 1746 // v 1747 // * dum_if 1748 // / \ 1749 // v \ 1750 // * dum-proj v 1751 // proj 1752 // 1753 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) { 1754 IfNode* iff = proj->in(0)->as_If(); 1755 IdealLoopTree *loop = get_loop(proj); 1756 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj(); 1757 int ddepth = dom_depth(proj); 1758 1759 _igvn.rehash_node_delayed(iff); 1760 _igvn.rehash_node_delayed(proj); 1761 1762 proj->set_req(0, NULL); // temporary disconnect 1763 ProjNode* proj2 = proj_clone(proj, iff); 1764 register_node(proj2, loop, iff, ddepth); 1765 1766 RegionNode* reg = new (C)RegionNode(2); 1767 reg->set_req(1, proj2); 1768 register_node(reg, loop, iff, ddepth); 1769 1770 IfNode* dum_if = new (C)IfNode(reg, short_circuit_if(NULL, proj), iff->_prob, iff->_fcnt); 1771 register_node(dum_if, loop, reg, ddepth); 1772 1773 proj->set_req(0, dum_if); // reattach 1774 set_idom(proj, dum_if, ddepth); 1775 1776 ProjNode* dum_proj = proj_clone(other_proj, dum_if); 1777 register_node(dum_proj, loop, dum_if, ddepth); 1778 1779 return reg; 1780 } 1781 1782 //------------------------------ insert_cmpi_loop_exit ------------------------------------- 1783 // Clone a signed compare loop exit from an unsigned compare and 1784 // insert it before the unsigned cmp on the stay-in-loop path. 1785 // All new nodes inserted in the dominator tree between the original 1786 // if and it's projections. The original if test is replaced with 1787 // a constant to force the stay-in-loop path. 1788 // 1789 // This is done to make sure that the original if and it's projections 1790 // still dominate the same set of control nodes, that the ctrl() relation 1791 // from data nodes to them is preserved, and that their loop nesting is 1792 // preserved. 1793 // 1794 // before 1795 // if(i <u limit) unsigned compare loop exit 1796 // / | 1797 // v v 1798 // exit-proj stay-in-loop-proj 1799 // 1800 // after 1801 // if(stay-in-loop-const) original if 1802 // / | 1803 // / v 1804 // / if(i < limit) new signed test 1805 // / / | 1806 // / / v 1807 // / / if(i <u limit) new cloned unsigned test 1808 // / / / | 1809 // v v v | 1810 // region | 1811 // | | 1812 // dum-if | 1813 // / | | 1814 // ether | | 1815 // v v 1816 // exit-proj stay-in-loop-proj 1817 // 1818 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop) { 1819 const bool Signed = true; 1820 const bool Unsigned = false; 1821 1822 BoolNode* bol = if_cmpu->in(1)->as_Bool(); 1823 if (bol->_test._test != BoolTest::lt) return NULL; 1824 CmpNode* cmpu = bol->in(1)->as_Cmp(); 1825 if (cmpu->Opcode() != Op_CmpU) return NULL; 1826 int stride = stride_of_possible_iv(if_cmpu); 1827 if (stride == 0) return NULL; 1828 1829 Node* lp_proj = stay_in_loop(if_cmpu, loop); 1830 guarantee(lp_proj != NULL, "null loop node"); 1831 1832 ProjNode* lp_continue = lp_proj->as_Proj(); 1833 ProjNode* lp_exit = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj(); 1834 1835 Node* limit = NULL; 1836 if (stride > 0) { 1837 limit = cmpu->in(2); 1838 } else { 1839 limit = _igvn.makecon(TypeInt::ZERO); 1840 set_ctrl(limit, C->root()); 1841 } 1842 // Create a new region on the exit path 1843 RegionNode* reg = insert_region_before_proj(lp_exit); 1844 guarantee(reg != NULL, "null region node"); 1845 1846 // Clone the if-cmpu-true-false using a signed compare 1847 BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge; 1848 ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, limit, lp_continue); 1849 reg->add_req(cmpi_exit); 1850 1851 // Clone the if-cmpu-true-false 1852 BoolTest::mask rel_u = bol->_test._test; 1853 ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue); 1854 reg->add_req(cmpu_exit); 1855 1856 // Force original if to stay in loop. 1857 short_circuit_if(if_cmpu, lp_continue); 1858 1859 return cmpi_exit->in(0)->as_If(); 1860 } 1861 1862 //------------------------------ remove_cmpi_loop_exit ------------------------------------- 1863 // Remove a previously inserted signed compare loop exit. 1864 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) { 1865 Node* lp_proj = stay_in_loop(if_cmp, loop); 1866 assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI && 1867 stay_in_loop(lp_proj, loop)->is_If() && 1868 stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu"); 1869 Node *con = _igvn.makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO); 1870 set_ctrl(con, C->root()); 1871 if_cmp->set_req(1, con); 1872 } 1873 1874 //------------------------------ scheduled_nodelist ------------------------------------- 1875 // Create a post order schedule of nodes that are in the 1876 // "member" set. The list is returned in "sched". 1877 // The first node in "sched" is the loop head, followed by 1878 // nodes which have no inputs in the "member" set, and then 1879 // followed by the nodes that have an immediate input dependence 1880 // on a node in "sched". 1881 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) { 1882 1883 assert(member.test(loop->_head->_idx), "loop head must be in member set"); 1884 Arena *a = Thread::current()->resource_area(); 1885 VectorSet visited(a); 1886 Node_Stack nstack(a, loop->_body.size()); 1887 1888 Node* n = loop->_head; // top of stack is cached in "n" 1889 uint idx = 0; 1890 visited.set(n->_idx); 1891 1892 // Initially push all with no inputs from within member set 1893 for(uint i = 0; i < loop->_body.size(); i++ ) { 1894 Node *elt = loop->_body.at(i); 1895 if (member.test(elt->_idx)) { 1896 bool found = false; 1897 for (uint j = 0; j < elt->req(); j++) { 1898 Node* def = elt->in(j); 1899 if (def && member.test(def->_idx) && def != elt) { 1900 found = true; 1901 break; 1902 } 1903 } 1904 if (!found && elt != loop->_head) { 1905 nstack.push(n, idx); 1906 n = elt; 1907 assert(!visited.test(n->_idx), "not seen yet"); 1908 visited.set(n->_idx); 1909 } 1910 } 1911 } 1912 1913 // traverse out's that are in the member set 1914 while (true) { 1915 if (idx < n->outcnt()) { 1916 Node* use = n->raw_out(idx); 1917 idx++; 1918 if (!visited.test_set(use->_idx)) { 1919 if (member.test(use->_idx)) { 1920 nstack.push(n, idx); 1921 n = use; 1922 idx = 0; 1923 } 1924 } 1925 } else { 1926 // All outputs processed 1927 sched.push(n); 1928 if (nstack.is_empty()) break; 1929 n = nstack.node(); 1930 idx = nstack.index(); 1931 nstack.pop(); 1932 } 1933 } 1934 } 1935 1936 1937 //------------------------------ has_use_in_set ------------------------------------- 1938 // Has a use in the vector set 1939 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) { 1940 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 1941 Node* use = n->fast_out(j); 1942 if (vset.test(use->_idx)) { 1943 return true; 1944 } 1945 } 1946 return false; 1947 } 1948 1949 1950 //------------------------------ has_use_internal_to_set ------------------------------------- 1951 // Has use internal to the vector set (ie. not in a phi at the loop head) 1952 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) { 1953 Node* head = loop->_head; 1954 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 1955 Node* use = n->fast_out(j); 1956 if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) { 1957 return true; 1958 } 1959 } 1960 return false; 1961 } 1962 1963 1964 //------------------------------ clone_for_use_outside_loop ------------------------------------- 1965 // clone "n" for uses that are outside of loop 1966 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) { 1967 int cloned = 0; 1968 assert(worklist.size() == 0, "should be empty"); 1969 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 1970 Node* use = n->fast_out(j); 1971 if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) { 1972 worklist.push(use); 1973 } 1974 } 1975 while( worklist.size() ) { 1976 Node *use = worklist.pop(); 1977 if (!has_node(use) || use->in(0) == C->top()) continue; 1978 uint j; 1979 for (j = 0; j < use->req(); j++) { 1980 if (use->in(j) == n) break; 1981 } 1982 assert(j < use->req(), "must be there"); 1983 1984 // clone "n" and insert it between the inputs of "n" and the use outside the loop 1985 Node* n_clone = n->clone(); 1986 _igvn.replace_input_of(use, j, n_clone); 1987 cloned++; 1988 Node* use_c; 1989 if (!use->is_Phi()) { 1990 use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0); 1991 } else { 1992 // Use in a phi is considered a use in the associated predecessor block 1993 use_c = use->in(0)->in(j); 1994 } 1995 set_ctrl(n_clone, use_c); 1996 assert(!loop->is_member(get_loop(use_c)), "should be outside loop"); 1997 get_loop(use_c)->_body.push(n_clone); 1998 _igvn.register_new_node_with_optimizer(n_clone); 1999 #if !defined(PRODUCT) 2000 if (TracePartialPeeling) { 2001 tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx); 2002 } 2003 #endif 2004 } 2005 return cloned; 2006 } 2007 2008 2009 //------------------------------ clone_for_special_use_inside_loop ------------------------------------- 2010 // clone "n" for special uses that are in the not_peeled region. 2011 // If these def-uses occur in separate blocks, the code generator 2012 // marks the method as not compilable. For example, if a "BoolNode" 2013 // is in a different basic block than the "IfNode" that uses it, then 2014 // the compilation is aborted in the code generator. 2015 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n, 2016 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) { 2017 if (n->is_Phi() || n->is_Load()) { 2018 return; 2019 } 2020 assert(worklist.size() == 0, "should be empty"); 2021 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 2022 Node* use = n->fast_out(j); 2023 if ( not_peel.test(use->_idx) && 2024 (use->is_If() || use->is_CMove() || use->is_Bool()) && 2025 use->in(1) == n) { 2026 worklist.push(use); 2027 } 2028 } 2029 if (worklist.size() > 0) { 2030 // clone "n" and insert it between inputs of "n" and the use 2031 Node* n_clone = n->clone(); 2032 loop->_body.push(n_clone); 2033 _igvn.register_new_node_with_optimizer(n_clone); 2034 set_ctrl(n_clone, get_ctrl(n)); 2035 sink_list.push(n_clone); 2036 not_peel <<= n_clone->_idx; // add n_clone to not_peel set. 2037 #if !defined(PRODUCT) 2038 if (TracePartialPeeling) { 2039 tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx); 2040 } 2041 #endif 2042 while( worklist.size() ) { 2043 Node *use = worklist.pop(); 2044 _igvn.rehash_node_delayed(use); 2045 for (uint j = 1; j < use->req(); j++) { 2046 if (use->in(j) == n) { 2047 use->set_req(j, n_clone); 2048 } 2049 } 2050 } 2051 } 2052 } 2053 2054 2055 //------------------------------ insert_phi_for_loop ------------------------------------- 2056 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist 2057 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) { 2058 Node *phi = PhiNode::make(lp, back_edge_val); 2059 phi->set_req(LoopNode::EntryControl, lp_entry_val); 2060 // Use existing phi if it already exists 2061 Node *hit = _igvn.hash_find_insert(phi); 2062 if( hit == NULL ) { 2063 _igvn.register_new_node_with_optimizer(phi); 2064 set_ctrl(phi, lp); 2065 } else { 2066 // Remove the new phi from the graph and use the hit 2067 _igvn.remove_dead_node(phi); 2068 phi = hit; 2069 } 2070 _igvn.replace_input_of(use, idx, phi); 2071 } 2072 2073 #ifdef ASSERT 2074 //------------------------------ is_valid_loop_partition ------------------------------------- 2075 // Validate the loop partition sets: peel and not_peel 2076 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, 2077 VectorSet& not_peel ) { 2078 uint i; 2079 // Check that peel_list entries are in the peel set 2080 for (i = 0; i < peel_list.size(); i++) { 2081 if (!peel.test(peel_list.at(i)->_idx)) { 2082 return false; 2083 } 2084 } 2085 // Check at loop members are in one of peel set or not_peel set 2086 for (i = 0; i < loop->_body.size(); i++ ) { 2087 Node *def = loop->_body.at(i); 2088 uint di = def->_idx; 2089 // Check that peel set elements are in peel_list 2090 if (peel.test(di)) { 2091 if (not_peel.test(di)) { 2092 return false; 2093 } 2094 // Must be in peel_list also 2095 bool found = false; 2096 for (uint j = 0; j < peel_list.size(); j++) { 2097 if (peel_list.at(j)->_idx == di) { 2098 found = true; 2099 break; 2100 } 2101 } 2102 if (!found) { 2103 return false; 2104 } 2105 } else if (not_peel.test(di)) { 2106 if (peel.test(di)) { 2107 return false; 2108 } 2109 } else { 2110 return false; 2111 } 2112 } 2113 return true; 2114 } 2115 2116 //------------------------------ is_valid_clone_loop_exit_use ------------------------------------- 2117 // Ensure a use outside of loop is of the right form 2118 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) { 2119 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use; 2120 return (use->is_Phi() && 2121 use_c->is_Region() && use_c->req() == 3 && 2122 (use_c->in(exit_idx)->Opcode() == Op_IfTrue || 2123 use_c->in(exit_idx)->Opcode() == Op_IfFalse || 2124 use_c->in(exit_idx)->Opcode() == Op_JumpProj) && 2125 loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) ); 2126 } 2127 2128 //------------------------------ is_valid_clone_loop_form ------------------------------------- 2129 // Ensure that all uses outside of loop are of the right form 2130 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list, 2131 uint orig_exit_idx, uint clone_exit_idx) { 2132 uint len = peel_list.size(); 2133 for (uint i = 0; i < len; i++) { 2134 Node *def = peel_list.at(i); 2135 2136 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { 2137 Node *use = def->fast_out(j); 2138 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use; 2139 if (!loop->is_member(get_loop(use_c))) { 2140 // use is not in the loop, check for correct structure 2141 if (use->in(0) == def) { 2142 // Okay 2143 } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) { 2144 return false; 2145 } 2146 } 2147 } 2148 } 2149 return true; 2150 } 2151 #endif 2152 2153 //------------------------------ partial_peel ------------------------------------- 2154 // Partially peel (aka loop rotation) the top portion of a loop (called 2155 // the peel section below) by cloning it and placing one copy just before 2156 // the new loop head and the other copy at the bottom of the new loop. 2157 // 2158 // before after where it came from 2159 // 2160 // stmt1 stmt1 2161 // loop: stmt2 clone 2162 // stmt2 if condA goto exitA clone 2163 // if condA goto exitA new_loop: new 2164 // stmt3 stmt3 clone 2165 // if !condB goto loop if condB goto exitB clone 2166 // exitB: stmt2 orig 2167 // stmt4 if !condA goto new_loop orig 2168 // exitA: goto exitA 2169 // exitB: 2170 // stmt4 2171 // exitA: 2172 // 2173 // Step 1: find the cut point: an exit test on probable 2174 // induction variable. 2175 // Step 2: schedule (with cloning) operations in the peel 2176 // section that can be executed after the cut into 2177 // the section that is not peeled. This may need 2178 // to clone operations into exit blocks. For 2179 // instance, a reference to A[i] in the not-peel 2180 // section and a reference to B[i] in an exit block 2181 // may cause a left-shift of i by 2 to be placed 2182 // in the peel block. This step will clone the left 2183 // shift into the exit block and sink the left shift 2184 // from the peel to the not-peel section. 2185 // Step 3: clone the loop, retarget the control, and insert 2186 // phis for values that are live across the new loop 2187 // head. This is very dependent on the graph structure 2188 // from clone_loop. It creates region nodes for 2189 // exit control and associated phi nodes for values 2190 // flow out of the loop through that exit. The region 2191 // node is dominated by the clone's control projection. 2192 // So the clone's peel section is placed before the 2193 // new loop head, and the clone's not-peel section is 2194 // forms the top part of the new loop. The original 2195 // peel section forms the tail of the new loop. 2196 // Step 4: update the dominator tree and recompute the 2197 // dominator depth. 2198 // 2199 // orig 2200 // 2201 // stmt1 2202 // | 2203 // v 2204 // loop predicate 2205 // | 2206 // v 2207 // loop<----+ 2208 // | | 2209 // stmt2 | 2210 // | | 2211 // v | 2212 // ifA | 2213 // / | | 2214 // v v | 2215 // false true ^ <-- last_peel 2216 // / | | 2217 // / ===|==cut | 2218 // / stmt3 | <-- first_not_peel 2219 // / | | 2220 // | v | 2221 // v ifB | 2222 // exitA: / \ | 2223 // / \ | 2224 // v v | 2225 // false true | 2226 // / \ | 2227 // / ----+ 2228 // | 2229 // v 2230 // exitB: 2231 // stmt4 2232 // 2233 // 2234 // after clone loop 2235 // 2236 // stmt1 2237 // | 2238 // v 2239 // loop predicate 2240 // / \ 2241 // clone / \ orig 2242 // / \ 2243 // / \ 2244 // v v 2245 // +---->loop loop<----+ 2246 // | | | | 2247 // | stmt2 stmt2 | 2248 // | | | | 2249 // | v v | 2250 // | ifA ifA | 2251 // | | \ / | | 2252 // | v v v v | 2253 // ^ true false false true ^ <-- last_peel 2254 // | | ^ \ / | | 2255 // | cut==|== \ \ / ===|==cut | 2256 // | stmt3 \ \ / stmt3 | <-- first_not_peel 2257 // | | dom | | | | 2258 // | v \ 1v v2 v | 2259 // | ifB regionA ifB | 2260 // | / \ | / \ | 2261 // | / \ v / \ | 2262 // | v v exitA: v v | 2263 // | true false false true | 2264 // | / ^ \ / \ | 2265 // +---- \ \ / ----+ 2266 // dom \ / 2267 // \ 1v v2 2268 // regionB 2269 // | 2270 // v 2271 // exitB: 2272 // stmt4 2273 // 2274 // 2275 // after partial peel 2276 // 2277 // stmt1 2278 // | 2279 // v 2280 // loop predicate 2281 // / 2282 // clone / orig 2283 // / TOP 2284 // / \ 2285 // v v 2286 // TOP->loop loop----+ 2287 // | | | 2288 // stmt2 stmt2 | 2289 // | | | 2290 // v v | 2291 // ifA ifA | 2292 // | \ / | | 2293 // v v v v | 2294 // true false false true | <-- last_peel 2295 // | ^ \ / +------|---+ 2296 // +->newloop \ \ / === ==cut | | 2297 // | stmt3 \ \ / TOP | | 2298 // | | dom | | stmt3 | | <-- first_not_peel 2299 // | v \ 1v v2 v | | 2300 // | ifB regionA ifB ^ v 2301 // | / \ | / \ | | 2302 // | / \ v / \ | | 2303 // | v v exitA: v v | | 2304 // | true false false true | | 2305 // | / ^ \ / \ | | 2306 // | | \ \ / v | | 2307 // | | dom \ / TOP | | 2308 // | | \ 1v v2 | | 2309 // ^ v regionB | | 2310 // | | | | | 2311 // | | v ^ v 2312 // | | exitB: | | 2313 // | | stmt4 | | 2314 // | +------------>-----------------+ | 2315 // | | 2316 // +-----------------<---------------------+ 2317 // 2318 // 2319 // final graph 2320 // 2321 // stmt1 2322 // | 2323 // v 2324 // loop predicate 2325 // | 2326 // v 2327 // stmt2 clone 2328 // | 2329 // v 2330 // ........> ifA clone 2331 // : / | 2332 // dom / | 2333 // : v v 2334 // : false true 2335 // : | | 2336 // : | v 2337 // : | newloop<-----+ 2338 // : | | | 2339 // : | stmt3 clone | 2340 // : | | | 2341 // : | v | 2342 // : | ifB | 2343 // : | / \ | 2344 // : | v v | 2345 // : | false true | 2346 // : | | | | 2347 // : | v stmt2 | 2348 // : | exitB: | | 2349 // : | stmt4 v | 2350 // : | ifA orig | 2351 // : | / \ | 2352 // : | / \ | 2353 // : | v v | 2354 // : | false true | 2355 // : | / \ | 2356 // : v v -----+ 2357 // RegionA 2358 // | 2359 // v 2360 // exitA 2361 // 2362 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { 2363 2364 assert(!loop->_head->is_CountedLoop(), "Non-counted loop only"); 2365 if (!loop->_head->is_Loop()) { 2366 return false; } 2367 2368 LoopNode *head = loop->_head->as_Loop(); 2369 2370 if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) { 2371 return false; 2372 } 2373 2374 // Check for complex exit control 2375 for(uint ii = 0; ii < loop->_body.size(); ii++ ) { 2376 Node *n = loop->_body.at(ii); 2377 int opc = n->Opcode(); 2378 if (n->is_Call() || 2379 opc == Op_Catch || 2380 opc == Op_CatchProj || 2381 opc == Op_Jump || 2382 opc == Op_JumpProj) { 2383 #if !defined(PRODUCT) 2384 if (TracePartialPeeling) { 2385 tty->print_cr("\nExit control too complex: lp: %d", head->_idx); 2386 } 2387 #endif 2388 return false; 2389 } 2390 } 2391 2392 int dd = dom_depth(head); 2393 2394 // Step 1: find cut point 2395 2396 // Walk up dominators to loop head looking for first loop exit 2397 // which is executed on every path thru loop. 2398 IfNode *peel_if = NULL; 2399 IfNode *peel_if_cmpu = NULL; 2400 2401 Node *iff = loop->tail(); 2402 while( iff != head ) { 2403 if( iff->is_If() ) { 2404 Node *ctrl = get_ctrl(iff->in(1)); 2405 if (ctrl->is_top()) return false; // Dead test on live IF. 2406 // If loop-varying exit-test, check for induction variable 2407 if( loop->is_member(get_loop(ctrl)) && 2408 loop->is_loop_exit(iff) && 2409 is_possible_iv_test(iff)) { 2410 Node* cmp = iff->in(1)->in(1); 2411 if (cmp->Opcode() == Op_CmpI) { 2412 peel_if = iff->as_If(); 2413 } else { 2414 assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU"); 2415 peel_if_cmpu = iff->as_If(); 2416 } 2417 } 2418 } 2419 iff = idom(iff); 2420 } 2421 // Prefer signed compare over unsigned compare. 2422 IfNode* new_peel_if = NULL; 2423 if (peel_if == NULL) { 2424 if (!PartialPeelAtUnsignedTests || peel_if_cmpu == NULL) { 2425 return false; // No peel point found 2426 } 2427 new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop); 2428 if (new_peel_if == NULL) { 2429 return false; // No peel point found 2430 } 2431 peel_if = new_peel_if; 2432 } 2433 Node* last_peel = stay_in_loop(peel_if, loop); 2434 Node* first_not_peeled = stay_in_loop(last_peel, loop); 2435 if (first_not_peeled == NULL || first_not_peeled == head) { 2436 return false; 2437 } 2438 2439 #if !defined(PRODUCT) 2440 if (TraceLoopOpts) { 2441 tty->print("PartialPeel "); 2442 loop->dump_head(); 2443 } 2444 2445 if (TracePartialPeeling) { 2446 tty->print_cr("before partial peel one iteration"); 2447 Node_List wl; 2448 Node* t = head->in(2); 2449 while (true) { 2450 wl.push(t); 2451 if (t == head) break; 2452 t = idom(t); 2453 } 2454 while (wl.size() > 0) { 2455 Node* tt = wl.pop(); 2456 tt->dump(); 2457 if (tt == last_peel) tty->print_cr("-- cut --"); 2458 } 2459 } 2460 #endif 2461 ResourceArea *area = Thread::current()->resource_area(); 2462 VectorSet peel(area); 2463 VectorSet not_peel(area); 2464 Node_List peel_list(area); 2465 Node_List worklist(area); 2466 Node_List sink_list(area); 2467 2468 // Set of cfg nodes to peel are those that are executable from 2469 // the head through last_peel. 2470 assert(worklist.size() == 0, "should be empty"); 2471 worklist.push(head); 2472 peel.set(head->_idx); 2473 while (worklist.size() > 0) { 2474 Node *n = worklist.pop(); 2475 if (n != last_peel) { 2476 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 2477 Node* use = n->fast_out(j); 2478 if (use->is_CFG() && 2479 loop->is_member(get_loop(use)) && 2480 !peel.test_set(use->_idx)) { 2481 worklist.push(use); 2482 } 2483 } 2484 } 2485 } 2486 2487 // Set of non-cfg nodes to peel are those that are control 2488 // dependent on the cfg nodes. 2489 uint i; 2490 for(i = 0; i < loop->_body.size(); i++ ) { 2491 Node *n = loop->_body.at(i); 2492 Node *n_c = has_ctrl(n) ? get_ctrl(n) : n; 2493 if (peel.test(n_c->_idx)) { 2494 peel.set(n->_idx); 2495 } else { 2496 not_peel.set(n->_idx); 2497 } 2498 } 2499 2500 // Step 2: move operations from the peeled section down into the 2501 // not-peeled section 2502 2503 // Get a post order schedule of nodes in the peel region 2504 // Result in right-most operand. 2505 scheduled_nodelist(loop, peel, peel_list ); 2506 2507 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition"); 2508 2509 // For future check for too many new phis 2510 uint old_phi_cnt = 0; 2511 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { 2512 Node* use = head->fast_out(j); 2513 if (use->is_Phi()) old_phi_cnt++; 2514 } 2515 2516 #if !defined(PRODUCT) 2517 if (TracePartialPeeling) { 2518 tty->print_cr("\npeeled list"); 2519 } 2520 #endif 2521 2522 // Evacuate nodes in peel region into the not_peeled region if possible 2523 uint new_phi_cnt = 0; 2524 uint cloned_for_outside_use = 0; 2525 for (i = 0; i < peel_list.size();) { 2526 Node* n = peel_list.at(i); 2527 #if !defined(PRODUCT) 2528 if (TracePartialPeeling) n->dump(); 2529 #endif 2530 bool incr = true; 2531 if ( !n->is_CFG() ) { 2532 2533 if ( has_use_in_set(n, not_peel) ) { 2534 2535 // If not used internal to the peeled region, 2536 // move "n" from peeled to not_peeled region. 2537 2538 if ( !has_use_internal_to_set(n, peel, loop) ) { 2539 2540 // if not pinned and not a load (which maybe anti-dependent on a store) 2541 // and not a CMove (Matcher expects only bool->cmove). 2542 if ( n->in(0) == NULL && !n->is_Load() && !n->is_CMove() ) { 2543 cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist ); 2544 sink_list.push(n); 2545 peel >>= n->_idx; // delete n from peel set. 2546 not_peel <<= n->_idx; // add n to not_peel set. 2547 peel_list.remove(i); 2548 incr = false; 2549 #if !defined(PRODUCT) 2550 if (TracePartialPeeling) { 2551 tty->print_cr("sink to not_peeled region: %d newbb: %d", 2552 n->_idx, get_ctrl(n)->_idx); 2553 } 2554 #endif 2555 } 2556 } else { 2557 // Otherwise check for special def-use cases that span 2558 // the peel/not_peel boundary such as bool->if 2559 clone_for_special_use_inside_loop( loop, n, not_peel, sink_list, worklist ); 2560 new_phi_cnt++; 2561 } 2562 } 2563 } 2564 if (incr) i++; 2565 } 2566 2567 if (new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta) { 2568 #if !defined(PRODUCT) 2569 if (TracePartialPeeling) { 2570 tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c", 2571 new_phi_cnt, old_phi_cnt, new_peel_if != NULL?'T':'F'); 2572 } 2573 #endif 2574 if (new_peel_if != NULL) { 2575 remove_cmpi_loop_exit(new_peel_if, loop); 2576 } 2577 // Inhibit more partial peeling on this loop 2578 assert(!head->is_partial_peel_loop(), "not partial peeled"); 2579 head->mark_partial_peel_failed(); 2580 if (cloned_for_outside_use > 0) { 2581 // Terminate this round of loop opts because 2582 // the graph outside this loop was changed. 2583 C->set_major_progress(); 2584 return true; 2585 } 2586 return false; 2587 } 2588 2589 // Step 3: clone loop, retarget control, and insert new phis 2590 2591 // Create new loop head for new phis and to hang 2592 // the nodes being moved (sinked) from the peel region. 2593 LoopNode* new_head = new (C) LoopNode(last_peel, last_peel); 2594 new_head->set_unswitch_count(head->unswitch_count()); // Preserve 2595 _igvn.register_new_node_with_optimizer(new_head); 2596 assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled"); 2597 first_not_peeled->set_req(0, new_head); 2598 set_loop(new_head, loop); 2599 loop->_body.push(new_head); 2600 not_peel.set(new_head->_idx); 2601 set_idom(new_head, last_peel, dom_depth(first_not_peeled)); 2602 set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled)); 2603 2604 while (sink_list.size() > 0) { 2605 Node* n = sink_list.pop(); 2606 set_ctrl(n, new_head); 2607 } 2608 2609 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition"); 2610 2611 clone_loop( loop, old_new, dd ); 2612 2613 const uint clone_exit_idx = 1; 2614 const uint orig_exit_idx = 2; 2615 assert(is_valid_clone_loop_form( loop, peel_list, orig_exit_idx, clone_exit_idx ), "bad clone loop"); 2616 2617 Node* head_clone = old_new[head->_idx]; 2618 LoopNode* new_head_clone = old_new[new_head->_idx]->as_Loop(); 2619 Node* orig_tail_clone = head_clone->in(2); 2620 2621 // Add phi if "def" node is in peel set and "use" is not 2622 2623 for(i = 0; i < peel_list.size(); i++ ) { 2624 Node *def = peel_list.at(i); 2625 if (!def->is_CFG()) { 2626 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { 2627 Node *use = def->fast_out(j); 2628 if (has_node(use) && use->in(0) != C->top() && 2629 (!peel.test(use->_idx) || 2630 (use->is_Phi() && use->in(0) == head)) ) { 2631 worklist.push(use); 2632 } 2633 } 2634 while( worklist.size() ) { 2635 Node *use = worklist.pop(); 2636 for (uint j = 1; j < use->req(); j++) { 2637 Node* n = use->in(j); 2638 if (n == def) { 2639 2640 // "def" is in peel set, "use" is not in peel set 2641 // or "use" is in the entry boundary (a phi) of the peel set 2642 2643 Node* use_c = has_ctrl(use) ? get_ctrl(use) : use; 2644 2645 if ( loop->is_member(get_loop( use_c )) ) { 2646 // use is in loop 2647 if (old_new[use->_idx] != NULL) { // null for dead code 2648 Node* use_clone = old_new[use->_idx]; 2649 _igvn.replace_input_of(use, j, C->top()); 2650 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone ); 2651 } 2652 } else { 2653 assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format"); 2654 // use is not in the loop, check if the live range includes the cut 2655 Node* lp_if = use_c->in(orig_exit_idx)->in(0); 2656 if (not_peel.test(lp_if->_idx)) { 2657 assert(j == orig_exit_idx, "use from original loop"); 2658 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone ); 2659 } 2660 } 2661 } 2662 } 2663 } 2664 } 2665 } 2666 2667 // Step 3b: retarget control 2668 2669 // Redirect control to the new loop head if a cloned node in 2670 // the not_peeled region has control that points into the peeled region. 2671 // This necessary because the cloned peeled region will be outside 2672 // the loop. 2673 // from to 2674 // cloned-peeled <---+ 2675 // new_head_clone: | <--+ 2676 // cloned-not_peeled in(0) in(0) 2677 // orig-peeled 2678 2679 for(i = 0; i < loop->_body.size(); i++ ) { 2680 Node *n = loop->_body.at(i); 2681 if (!n->is_CFG() && n->in(0) != NULL && 2682 not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) { 2683 Node* n_clone = old_new[n->_idx]; 2684 _igvn.replace_input_of(n_clone, 0, new_head_clone); 2685 } 2686 } 2687 2688 // Backedge of the surviving new_head (the clone) is original last_peel 2689 _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel); 2690 2691 // Cut first node in original not_peel set 2692 _igvn.rehash_node_delayed(new_head); // Multiple edge updates: 2693 new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of 2694 new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls 2695 2696 // Copy head_clone back-branch info to original head 2697 // and remove original head's loop entry and 2698 // clone head's back-branch 2699 _igvn.rehash_node_delayed(head); // Multiple edge updates 2700 head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl)); 2701 head->set_req(LoopNode::LoopBackControl, C->top()); 2702 _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top()); 2703 2704 // Similarly modify the phis 2705 for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) { 2706 Node* use = head->fast_out(k); 2707 if (use->is_Phi() && use->outcnt() > 0) { 2708 Node* use_clone = old_new[use->_idx]; 2709 _igvn.rehash_node_delayed(use); // Multiple edge updates 2710 use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl)); 2711 use->set_req(LoopNode::LoopBackControl, C->top()); 2712 _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top()); 2713 } 2714 } 2715 2716 // Step 4: update dominator tree and dominator depth 2717 2718 set_idom(head, orig_tail_clone, dd); 2719 recompute_dom_depth(); 2720 2721 // Inhibit more partial peeling on this loop 2722 new_head_clone->set_partial_peel_loop(); 2723 C->set_major_progress(); 2724 loop->record_for_igvn(); 2725 2726 #if !defined(PRODUCT) 2727 if (TracePartialPeeling) { 2728 tty->print_cr("\nafter partial peel one iteration"); 2729 Node_List wl(area); 2730 Node* t = last_peel; 2731 while (true) { 2732 wl.push(t); 2733 if (t == head_clone) break; 2734 t = idom(t); 2735 } 2736 while (wl.size() > 0) { 2737 Node* tt = wl.pop(); 2738 if (tt == head) tty->print_cr("orig head"); 2739 else if (tt == new_head_clone) tty->print_cr("new head"); 2740 else if (tt == head_clone) tty->print_cr("clone head"); 2741 tt->dump(); 2742 } 2743 } 2744 #endif 2745 return true; 2746 } 2747 2748 //------------------------------reorg_offsets---------------------------------- 2749 // Reorganize offset computations to lower register pressure. Mostly 2750 // prevent loop-fallout uses of the pre-incremented trip counter (which are 2751 // then alive with the post-incremented trip counter forcing an extra 2752 // register move) 2753 void PhaseIdealLoop::reorg_offsets(IdealLoopTree *loop) { 2754 // Perform it only for canonical counted loops. 2755 // Loop's shape could be messed up by iteration_split_impl. 2756 if (!loop->_head->is_CountedLoop()) 2757 return; 2758 if (!loop->_head->as_Loop()->is_valid_counted_loop()) 2759 return; 2760 2761 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 2762 CountedLoopEndNode *cle = cl->loopexit(); 2763 Node *exit = cle->proj_out(false); 2764 Node *phi = cl->phi(); 2765 2766 // Check for the special case of folks using the pre-incremented 2767 // trip-counter on the fall-out path (forces the pre-incremented 2768 // and post-incremented trip counter to be live at the same time). 2769 // Fix this by adjusting to use the post-increment trip counter. 2770 2771 bool progress = true; 2772 while (progress) { 2773 progress = false; 2774 for (DUIterator_Fast imax, i = phi->fast_outs(imax); i < imax; i++) { 2775 Node* use = phi->fast_out(i); // User of trip-counter 2776 if (!has_ctrl(use)) continue; 2777 Node *u_ctrl = get_ctrl(use); 2778 if (use->is_Phi()) { 2779 u_ctrl = NULL; 2780 for (uint j = 1; j < use->req(); j++) 2781 if (use->in(j) == phi) 2782 u_ctrl = dom_lca(u_ctrl, use->in(0)->in(j)); 2783 } 2784 IdealLoopTree *u_loop = get_loop(u_ctrl); 2785 // Look for loop-invariant use 2786 if (u_loop == loop) continue; 2787 if (loop->is_member(u_loop)) continue; 2788 // Check that use is live out the bottom. Assuming the trip-counter 2789 // update is right at the bottom, uses of of the loop middle are ok. 2790 if (dom_lca(exit, u_ctrl) != exit) continue; 2791 // Hit! Refactor use to use the post-incremented tripcounter. 2792 // Compute a post-increment tripcounter. 2793 Node *opaq = new (C) Opaque2Node( C, cle->incr() ); 2794 register_new_node(opaq, exit); 2795 Node *neg_stride = _igvn.intcon(-cle->stride_con()); 2796 set_ctrl(neg_stride, C->root()); 2797 Node *post = new (C) AddINode( opaq, neg_stride); 2798 register_new_node(post, exit); 2799 _igvn.rehash_node_delayed(use); 2800 for (uint j = 1; j < use->req(); j++) { 2801 if (use->in(j) == phi) 2802 use->set_req(j, post); 2803 } 2804 // Since DU info changed, rerun loop 2805 progress = true; 2806 break; 2807 } 2808 } 2809 2810 }