1 /* 2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "gc/shared/barrierSet.hpp" 26 #include "gc/shared/c2/barrierSetC2.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "opto/addnode.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/castnode.hpp" 32 #include "opto/connode.hpp" 33 #include "opto/castnode.hpp" 34 #include "opto/divnode.hpp" 35 #include "opto/loopnode.hpp" 36 #include "opto/matcher.hpp" 37 #include "opto/mulnode.hpp" 38 #include "opto/movenode.hpp" 39 #include "opto/opaquenode.hpp" 40 #include "opto/rootnode.hpp" 41 #include "opto/subnode.hpp" 42 #include "opto/subtypenode.hpp" 43 #include "opto/superword.hpp" 44 #include "opto/vectornode.hpp" 45 #include "utilities/macros.hpp" 46 47 //============================================================================= 48 //------------------------------split_thru_phi--------------------------------- 49 // Split Node 'n' through merge point if there is enough win. 50 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) { 51 if ((n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) || 52 (n->Opcode() == Op_ConvL2I && n->bottom_type() != TypeInt::INT)) { 53 // ConvI2L/ConvL2I may have type information on it which is unsafe to push up 54 // so disable this for now 55 return nullptr; 56 } 57 58 // Splitting range check CastIIs through a loop induction Phi can 59 // cause new Phis to be created that are left unrelated to the loop 60 // induction Phi and prevent optimizations (vectorization) 61 if (n->Opcode() == Op_CastII && region->is_CountedLoop() && 62 n->in(1) == region->as_CountedLoop()->phi()) { 63 return nullptr; 64 } 65 66 if (cannot_split_division(n, region)) { 67 return nullptr; 68 } 69 70 int wins = 0; 71 assert(!n->is_CFG(), ""); 72 assert(region->is_Region(), ""); 73 74 const Type* type = n->bottom_type(); 75 const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr(); 76 Node* phi; 77 if (t_oop != nullptr && t_oop->is_known_instance_field()) { 78 int iid = t_oop->instance_id(); 79 int index = C->get_alias_index(t_oop); 80 int offset = t_oop->offset(); 81 phi = new PhiNode(region, type, nullptr, iid, index, offset); 82 } else { 83 phi = PhiNode::make_blank(region, n); 84 } 85 uint old_unique = C->unique(); 86 for (uint i = 1; i < region->req(); i++) { 87 Node* x; 88 Node* the_clone = nullptr; 89 if (region->in(i) == C->top()) { 90 x = C->top(); // Dead path? Use a dead data op 91 } else { 92 x = n->clone(); // Else clone up the data op 93 the_clone = x; // Remember for possible deletion. 94 // Alter data node to use pre-phi inputs 95 if (n->in(0) == region) 96 x->set_req( 0, region->in(i) ); 97 for (uint j = 1; j < n->req(); j++) { 98 Node* in = n->in(j); 99 if (in->is_Phi() && in->in(0) == region) 100 x->set_req(j, in->in(i)); // Use pre-Phi input for the clone 101 } 102 } 103 // Check for a 'win' on some paths 104 const Type* t = x->Value(&_igvn); 105 106 bool singleton = t->singleton(); 107 108 // A TOP singleton indicates that there are no possible values incoming 109 // along a particular edge. In most cases, this is OK, and the Phi will 110 // be eliminated later in an Ideal call. However, we can't allow this to 111 // happen if the singleton occurs on loop entry, as the elimination of 112 // the PhiNode may cause the resulting node to migrate back to a previous 113 // loop iteration. 114 if (singleton && t == Type::TOP) { 115 // Is_Loop() == false does not confirm the absence of a loop (e.g., an 116 // irreducible loop may not be indicated by an affirmative is_Loop()); 117 // therefore, the only top we can split thru a phi is on a backedge of 118 // a loop. 119 singleton &= region->is_Loop() && (i != LoopNode::EntryControl); 120 } 121 122 if (singleton) { 123 wins++; 124 x = makecon(t); 125 } else { 126 // We now call Identity to try to simplify the cloned node. 127 // Note that some Identity methods call phase->type(this). 128 // Make sure that the type array is big enough for 129 // our new node, even though we may throw the node away. 130 // (Note: This tweaking with igvn only works because x is a new node.) 131 _igvn.set_type(x, t); 132 // If x is a TypeNode, capture any more-precise type permanently into Node 133 // otherwise it will be not updated during igvn->transform since 134 // igvn->type(x) is set to x->Value() already. 135 x->raise_bottom_type(t); 136 Node* y = x->Identity(&_igvn); 137 if (y != x) { 138 wins++; 139 x = y; 140 } else { 141 y = _igvn.hash_find(x); 142 if (y == nullptr) { 143 y = similar_subtype_check(x, region->in(i)); 144 } 145 if (y) { 146 wins++; 147 x = y; 148 } else { 149 // Else x is a new node we are keeping 150 // We do not need register_new_node_with_optimizer 151 // because set_type has already been called. 152 _igvn._worklist.push(x); 153 } 154 } 155 } 156 157 phi->set_req( i, x ); 158 159 if (the_clone == nullptr) { 160 continue; 161 } 162 163 if (the_clone != x) { 164 _igvn.remove_dead_node(the_clone); 165 } else if (region->is_Loop() && i == LoopNode::LoopBackControl && 166 n->is_Load() && can_move_to_inner_loop(n, region->as_Loop(), x)) { 167 // it is not a win if 'x' moved from an outer to an inner loop 168 // this edge case can only happen for Load nodes 169 wins = 0; 170 break; 171 } 172 } 173 // Too few wins? 174 if (wins <= policy) { 175 _igvn.remove_dead_node(phi); 176 return nullptr; 177 } 178 179 // Record Phi 180 register_new_node( phi, region ); 181 182 for (uint i2 = 1; i2 < phi->req(); i2++) { 183 Node *x = phi->in(i2); 184 // If we commoned up the cloned 'x' with another existing Node, 185 // the existing Node picks up a new use. We need to make the 186 // existing Node occur higher up so it dominates its uses. 187 Node *old_ctrl; 188 IdealLoopTree *old_loop; 189 190 if (x->is_Con()) { 191 assert(get_ctrl(x) == C->root(), "constant control is not root"); 192 continue; 193 } 194 // The occasional new node 195 if (x->_idx >= old_unique) { // Found a new, unplaced node? 196 old_ctrl = nullptr; 197 old_loop = nullptr; // Not in any prior loop 198 } else { 199 old_ctrl = get_ctrl(x); 200 old_loop = get_loop(old_ctrl); // Get prior loop 201 } 202 // New late point must dominate new use 203 Node *new_ctrl = dom_lca(old_ctrl, region->in(i2)); 204 if (new_ctrl == old_ctrl) // Nothing is changed 205 continue; 206 207 IdealLoopTree *new_loop = get_loop(new_ctrl); 208 209 // Don't move x into a loop if its uses are 210 // outside of loop. Otherwise x will be cloned 211 // for each use outside of this loop. 212 IdealLoopTree *use_loop = get_loop(region); 213 if (!new_loop->is_member(use_loop) && 214 (old_loop == nullptr || !new_loop->is_member(old_loop))) { 215 // Take early control, later control will be recalculated 216 // during next iteration of loop optimizations. 217 new_ctrl = get_early_ctrl(x); 218 new_loop = get_loop(new_ctrl); 219 } 220 // Set new location 221 set_ctrl(x, new_ctrl); 222 // If changing loop bodies, see if we need to collect into new body 223 if (old_loop != new_loop) { 224 if (old_loop && !old_loop->_child) 225 old_loop->_body.yank(x); 226 if (!new_loop->_child) 227 new_loop->_body.push(x); // Collect body info 228 } 229 } 230 231 return phi; 232 } 233 234 // Test whether node 'x' can move into an inner loop relative to node 'n'. 235 // Note: The test is not exact. Returns true if 'x' COULD end up in an inner loop, 236 // BUT it can also return true and 'x' is in the outer loop 237 bool PhaseIdealLoop::can_move_to_inner_loop(Node* n, LoopNode* n_loop, Node* x) { 238 IdealLoopTree* n_loop_tree = get_loop(n_loop); 239 IdealLoopTree* x_loop_tree = get_loop(get_early_ctrl(x)); 240 // x_loop_tree should be outer or same loop as n_loop_tree 241 return !x_loop_tree->is_member(n_loop_tree); 242 } 243 244 // Subtype checks that carry profile data don't common so look for a replacement by following edges 245 Node* PhaseIdealLoop::similar_subtype_check(const Node* x, Node* r_in) { 246 if (x->is_SubTypeCheck()) { 247 Node* in1 = x->in(1); 248 for (DUIterator_Fast imax, i = in1->fast_outs(imax); i < imax; i++) { 249 Node* u = in1->fast_out(i); 250 if (u != x && u->is_SubTypeCheck() && u->in(1) == x->in(1) && u->in(2) == x->in(2)) { 251 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { 252 Node* bol = u->fast_out(j); 253 for (DUIterator_Fast kmax, k = bol->fast_outs(kmax); k < kmax; k++) { 254 Node* iff = bol->fast_out(k); 255 // Only dominating subtype checks are interesting: otherwise we risk replacing a subtype check by another with 256 // unrelated profile 257 if (iff->is_If() && is_dominator(iff, r_in)) { 258 return u; 259 } 260 } 261 } 262 } 263 } 264 } 265 return nullptr; 266 } 267 268 // Return true if 'n' is a Div or Mod node (without zero check If node which was removed earlier) with a loop phi divisor 269 // of a trip-counted (integer or long) loop with a backedge input that could be zero (include zero in its type range). In 270 // this case, we cannot split the division to the backedge as it could freely float above the loop exit check resulting in 271 // a division by zero. This situation is possible because the type of an increment node of an iv phi (trip-counter) could 272 // include zero while the iv phi does not (see PhiNode::Value() for trip-counted loops where we improve types of iv phis). 273 // We also need to check other loop phis as they could have been created in the same split-if pass when applying 274 // PhaseIdealLoop::split_thru_phi() to split nodes through an iv phi. 275 bool PhaseIdealLoop::cannot_split_division(const Node* n, const Node* region) const { 276 const Type* zero; 277 switch (n->Opcode()) { 278 case Op_DivI: 279 case Op_ModI: 280 case Op_UDivI: 281 case Op_UModI: 282 zero = TypeInt::ZERO; 283 break; 284 case Op_DivL: 285 case Op_ModL: 286 case Op_UDivL: 287 case Op_UModL: 288 zero = TypeLong::ZERO; 289 break; 290 default: 291 return false; 292 } 293 294 if (n->in(0) != nullptr) { 295 // Cannot split through phi if Div or Mod node has a control dependency to a zero check. 296 return true; 297 } 298 299 Node* divisor = n->in(2); 300 return is_divisor_loop_phi(divisor, region) && 301 loop_phi_backedge_type_contains_zero(divisor, zero); 302 } 303 304 bool PhaseIdealLoop::is_divisor_loop_phi(const Node* divisor, const Node* loop) { 305 return loop->is_Loop() && divisor->is_Phi() && divisor->in(0) == loop; 306 } 307 308 bool PhaseIdealLoop::loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const { 309 return _igvn.type(phi_divisor->in(LoopNode::LoopBackControl))->filter_speculative(zero) != Type::TOP; 310 } 311 312 //------------------------------dominated_by------------------------------------ 313 // Replace the dominated test with an obvious true or false. Place it on the 314 // IGVN worklist for later cleanup. Move control-dependent data Nodes on the 315 // live path up to the dominating control. 316 void PhaseIdealLoop::dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip, bool pin_array_access_nodes) { 317 if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); } 318 319 // prevdom is the dominating projection of the dominating test. 320 assert(iff->Opcode() == Op_If || 321 iff->Opcode() == Op_CountedLoopEnd || 322 iff->Opcode() == Op_LongCountedLoopEnd || 323 iff->Opcode() == Op_RangeCheck || 324 iff->Opcode() == Op_ParsePredicate, 325 "Check this code when new subtype is added"); 326 327 int pop = prevdom->Opcode(); 328 assert( pop == Op_IfFalse || pop == Op_IfTrue, "" ); 329 if (flip) { 330 if (pop == Op_IfTrue) 331 pop = Op_IfFalse; 332 else 333 pop = Op_IfTrue; 334 } 335 // 'con' is set to true or false to kill the dominated test. 336 Node* con = makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO); 337 // Hack the dominated test 338 _igvn.replace_input_of(iff, 1, con); 339 340 // If I don't have a reachable TRUE and FALSE path following the IfNode then 341 // I can assume this path reaches an infinite loop. In this case it's not 342 // important to optimize the data Nodes - either the whole compilation will 343 // be tossed or this path (and all data Nodes) will go dead. 344 if (iff->outcnt() != 2) { 345 return; 346 } 347 348 // Make control-dependent data Nodes on the live path (path that will remain 349 // once the dominated IF is removed) become control-dependent on the 350 // dominating projection. 351 Node* dp = iff->proj_out_or_null(pop == Op_IfTrue); 352 353 if (dp == nullptr) { 354 return; 355 } 356 357 rewire_safe_outputs_to_dominator(dp, prevdom, pin_array_access_nodes); 358 } 359 360 void PhaseIdealLoop::rewire_safe_outputs_to_dominator(Node* source, Node* dominator, const bool pin_array_access_nodes) { 361 IdealLoopTree* old_loop = get_loop(source); 362 363 for (DUIterator_Fast imax, i = source->fast_outs(imax); i < imax; i++) { 364 Node* out = source->fast_out(i); // Control-dependent node 365 // Do not rewire Div and Mod nodes which could have a zero divisor to avoid skipping their zero check. 366 if (out->depends_only_on_test() && _igvn.no_dependent_zero_check(out)) { 367 assert(out->in(0) == source, "must be control dependent on source"); 368 _igvn.replace_input_of(out, 0, dominator); 369 if (pin_array_access_nodes) { 370 // Because of Loop Predication, Loads and range check Cast nodes that are control dependent on this range 371 // check (that is about to be removed) now depend on multiple dominating Hoisted Check Predicates. After the 372 // removal of this range check, these control dependent nodes end up at the lowest/nearest dominating predicate 373 // in the graph. To ensure that these Loads/Casts do not float above any of the dominating checks (even when the 374 // lowest dominating check is later replaced by yet another dominating check), we need to pin them at the lowest 375 // dominating check. 376 Node* clone = out->pin_array_access_node(); 377 if (clone != nullptr) { 378 clone = _igvn.register_new_node_with_optimizer(clone, out); 379 _igvn.replace_node(out, clone); 380 out = clone; 381 } 382 } 383 set_early_ctrl(out, false); 384 IdealLoopTree* new_loop = get_loop(get_ctrl(out)); 385 if (old_loop != new_loop) { 386 if (!old_loop->_child) { 387 old_loop->_body.yank(out); 388 } 389 if (!new_loop->_child) { 390 new_loop->_body.push(out); 391 } 392 } 393 --i; 394 --imax; 395 } 396 } 397 } 398 399 //------------------------------has_local_phi_input---------------------------- 400 // Return TRUE if 'n' has Phi inputs from its local block and no other 401 // block-local inputs (all non-local-phi inputs come from earlier blocks) 402 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) { 403 Node *n_ctrl = get_ctrl(n); 404 // See if some inputs come from a Phi in this block, or from before 405 // this block. 406 uint i; 407 for( i = 1; i < n->req(); i++ ) { 408 Node *phi = n->in(i); 409 if( phi->is_Phi() && phi->in(0) == n_ctrl ) 410 break; 411 } 412 if( i >= n->req() ) 413 return nullptr; // No Phi inputs; nowhere to clone thru 414 415 // Check for inputs created between 'n' and the Phi input. These 416 // must split as well; they have already been given the chance 417 // (courtesy of a post-order visit) and since they did not we must 418 // recover the 'cost' of splitting them by being very profitable 419 // when splitting 'n'. Since this is unlikely we simply give up. 420 for( i = 1; i < n->req(); i++ ) { 421 Node *m = n->in(i); 422 if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) { 423 // We allow the special case of AddP's with no local inputs. 424 // This allows us to split-up address expressions. 425 if (m->is_AddP() && 426 get_ctrl(m->in(AddPNode::Base)) != n_ctrl && 427 get_ctrl(m->in(AddPNode::Address)) != n_ctrl && 428 get_ctrl(m->in(AddPNode::Offset)) != n_ctrl) { 429 // Move the AddP up to the dominating point. That's fine because control of m's inputs 430 // must dominate get_ctrl(m) == n_ctrl and we just checked that the input controls are != n_ctrl. 431 Node* c = find_non_split_ctrl(idom(n_ctrl)); 432 if (c->is_OuterStripMinedLoop()) { 433 c->as_Loop()->verify_strip_mined(1); 434 c = c->in(LoopNode::EntryControl); 435 } 436 set_ctrl_and_loop(m, c); 437 continue; 438 } 439 return nullptr; 440 } 441 assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control"); 442 } 443 444 return n_ctrl; 445 } 446 447 // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2). 448 Node* PhaseIdealLoop::remix_address_expressions_add_left_shift(Node* n, IdealLoopTree* n_loop, Node* n_ctrl, BasicType bt) { 449 assert(bt == T_INT || bt == T_LONG, "only for integers"); 450 int n_op = n->Opcode(); 451 452 if (n_op == Op_LShift(bt)) { 453 // Scale is loop invariant 454 Node* scale = n->in(2); 455 Node* scale_ctrl = get_ctrl(scale); 456 IdealLoopTree* scale_loop = get_loop(scale_ctrl); 457 if (n_loop == scale_loop || !scale_loop->is_member(n_loop)) { 458 return nullptr; 459 } 460 const TypeInt* scale_t = scale->bottom_type()->isa_int(); 461 if (scale_t != nullptr && scale_t->is_con() && scale_t->get_con() >= 16) { 462 return nullptr; // Dont bother with byte/short masking 463 } 464 // Add must vary with loop (else shift would be loop-invariant) 465 Node* add = n->in(1); 466 Node* add_ctrl = get_ctrl(add); 467 IdealLoopTree* add_loop = get_loop(add_ctrl); 468 if (n_loop != add_loop) { 469 return nullptr; // happens w/ evil ZKM loops 470 } 471 472 // Convert I-V into I+ (0-V); same for V-I 473 if (add->Opcode() == Op_Sub(bt) && 474 _igvn.type(add->in(1)) != TypeInteger::zero(bt)) { 475 assert(add->Opcode() == Op_SubI || add->Opcode() == Op_SubL, ""); 476 Node* zero = integercon(0, bt); 477 Node* neg = SubNode::make(zero, add->in(2), bt); 478 register_new_node_with_ctrl_of(neg, add->in(2)); 479 add = AddNode::make(add->in(1), neg, bt); 480 register_new_node(add, add_ctrl); 481 } 482 if (add->Opcode() != Op_Add(bt)) return nullptr; 483 assert(add->Opcode() == Op_AddI || add->Opcode() == Op_AddL, ""); 484 // See if one add input is loop invariant 485 Node* add_var = add->in(1); 486 Node* add_var_ctrl = get_ctrl(add_var); 487 IdealLoopTree* add_var_loop = get_loop(add_var_ctrl); 488 Node* add_invar = add->in(2); 489 Node* add_invar_ctrl = get_ctrl(add_invar); 490 IdealLoopTree* add_invar_loop = get_loop(add_invar_ctrl); 491 if (add_invar_loop == n_loop) { 492 // Swap to find the invariant part 493 add_invar = add_var; 494 add_invar_ctrl = add_var_ctrl; 495 add_invar_loop = add_var_loop; 496 add_var = add->in(2); 497 } else if (add_var_loop != n_loop) { // Else neither input is loop invariant 498 return nullptr; 499 } 500 if (n_loop == add_invar_loop || !add_invar_loop->is_member(n_loop)) { 501 return nullptr; // No invariant part of the add? 502 } 503 504 // Yes! Reshape address expression! 505 Node* inv_scale = LShiftNode::make(add_invar, scale, bt); 506 Node* inv_scale_ctrl = 507 dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ? 508 add_invar_ctrl : scale_ctrl; 509 register_new_node(inv_scale, inv_scale_ctrl); 510 Node* var_scale = LShiftNode::make(add_var, scale, bt); 511 register_new_node(var_scale, n_ctrl); 512 Node* var_add = AddNode::make(var_scale, inv_scale, bt); 513 register_new_node(var_add, n_ctrl); 514 _igvn.replace_node(n, var_add); 515 return var_add; 516 } 517 return nullptr; 518 } 519 520 //------------------------------remix_address_expressions---------------------- 521 // Rework addressing expressions to get the most loop-invariant stuff 522 // moved out. We'd like to do all associative operators, but it's especially 523 // important (common) to do address expressions. 524 Node* PhaseIdealLoop::remix_address_expressions(Node* n) { 525 if (!has_ctrl(n)) return nullptr; 526 Node* n_ctrl = get_ctrl(n); 527 IdealLoopTree* n_loop = get_loop(n_ctrl); 528 529 // See if 'n' mixes loop-varying and loop-invariant inputs and 530 // itself is loop-varying. 531 532 // Only interested in binary ops (and AddP) 533 if (n->req() < 3 || n->req() > 4) return nullptr; 534 535 Node* n1_ctrl = get_ctrl(n->in( 1)); 536 Node* n2_ctrl = get_ctrl(n->in( 2)); 537 Node* n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3)); 538 IdealLoopTree* n1_loop = get_loop(n1_ctrl); 539 IdealLoopTree* n2_loop = get_loop(n2_ctrl); 540 IdealLoopTree* n3_loop = get_loop(n3_ctrl); 541 542 // Does one of my inputs spin in a tighter loop than self? 543 if ((n_loop->is_member(n1_loop) && n_loop != n1_loop) || 544 (n_loop->is_member(n2_loop) && n_loop != n2_loop) || 545 (n_loop->is_member(n3_loop) && n_loop != n3_loop)) { 546 return nullptr; // Leave well enough alone 547 } 548 549 // Is at least one of my inputs loop-invariant? 550 if (n1_loop == n_loop && 551 n2_loop == n_loop && 552 n3_loop == n_loop) { 553 return nullptr; // No loop-invariant inputs 554 } 555 556 Node* res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_INT); 557 if (res != nullptr) { 558 return res; 559 } 560 res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_LONG); 561 if (res != nullptr) { 562 return res; 563 } 564 565 int n_op = n->Opcode(); 566 // Replace (I+V) with (V+I) 567 if (n_op == Op_AddI || 568 n_op == Op_AddL || 569 n_op == Op_AddF || 570 n_op == Op_AddD || 571 n_op == Op_MulI || 572 n_op == Op_MulL || 573 n_op == Op_MulF || 574 n_op == Op_MulD) { 575 if (n2_loop == n_loop) { 576 assert(n1_loop != n_loop, ""); 577 n->swap_edges(1, 2); 578 } 579 } 580 581 // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V), 582 // but not if I2 is a constant. Skip for irreducible loops. 583 if (n_op == Op_AddP && n_loop->_head->is_Loop()) { 584 if (n2_loop == n_loop && n3_loop != n_loop) { 585 if (n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con()) { 586 Node* n22_ctrl = get_ctrl(n->in(2)->in(2)); 587 Node* n23_ctrl = get_ctrl(n->in(2)->in(3)); 588 IdealLoopTree* n22loop = get_loop(n22_ctrl); 589 IdealLoopTree* n23_loop = get_loop(n23_ctrl); 590 if (n22loop != n_loop && n22loop->is_member(n_loop) && 591 n23_loop == n_loop) { 592 Node* add1 = new AddPNode(n->in(1), n->in(2)->in(2), n->in(3)); 593 // Stuff new AddP in the loop preheader 594 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl)); 595 Node* add2 = new AddPNode(n->in(1), add1, n->in(2)->in(3)); 596 register_new_node(add2, n_ctrl); 597 _igvn.replace_node(n, add2); 598 return add2; 599 } 600 } 601 } 602 603 // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V) 604 if (n2_loop != n_loop && n3_loop == n_loop) { 605 if (n->in(3)->Opcode() == Op_AddX) { 606 Node* V = n->in(3)->in(1); 607 Node* I = n->in(3)->in(2); 608 if (is_member(n_loop,get_ctrl(V))) { 609 } else { 610 Node *tmp = V; V = I; I = tmp; 611 } 612 if (!is_member(n_loop,get_ctrl(I))) { 613 Node* add1 = new AddPNode(n->in(1), n->in(2), I); 614 // Stuff new AddP in the loop preheader 615 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl)); 616 Node* add2 = new AddPNode(n->in(1), add1, V); 617 register_new_node(add2, n_ctrl); 618 _igvn.replace_node(n, add2); 619 return add2; 620 } 621 } 622 } 623 } 624 625 return nullptr; 626 } 627 628 // Optimize ((in1[2*i] * in2[2*i]) + (in1[2*i+1] * in2[2*i+1])) 629 Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) { 630 assert(n->Opcode() == Op_AddI, "sanity"); 631 Node * nn = nullptr; 632 Node * in1 = n->in(1); 633 Node * in2 = n->in(2); 634 if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) { 635 IdealLoopTree* loop_n = get_loop(get_ctrl(n)); 636 if (loop_n->is_counted() && 637 loop_n->_head->as_Loop()->is_valid_counted_loop(T_INT) && 638 Matcher::match_rule_supported(Op_MulAddVS2VI) && 639 Matcher::match_rule_supported(Op_MulAddS2I)) { 640 Node* mul_in1 = in1->in(1); 641 Node* mul_in2 = in1->in(2); 642 Node* mul_in3 = in2->in(1); 643 Node* mul_in4 = in2->in(2); 644 if (mul_in1->Opcode() == Op_LoadS && 645 mul_in2->Opcode() == Op_LoadS && 646 mul_in3->Opcode() == Op_LoadS && 647 mul_in4->Opcode() == Op_LoadS) { 648 IdealLoopTree* loop1 = get_loop(get_ctrl(mul_in1)); 649 IdealLoopTree* loop2 = get_loop(get_ctrl(mul_in2)); 650 IdealLoopTree* loop3 = get_loop(get_ctrl(mul_in3)); 651 IdealLoopTree* loop4 = get_loop(get_ctrl(mul_in4)); 652 IdealLoopTree* loop5 = get_loop(get_ctrl(in1)); 653 IdealLoopTree* loop6 = get_loop(get_ctrl(in2)); 654 // All nodes should be in the same counted loop. 655 if (loop_n == loop1 && loop_n == loop2 && loop_n == loop3 && 656 loop_n == loop4 && loop_n == loop5 && loop_n == loop6) { 657 Node* adr1 = mul_in1->in(MemNode::Address); 658 Node* adr2 = mul_in2->in(MemNode::Address); 659 Node* adr3 = mul_in3->in(MemNode::Address); 660 Node* adr4 = mul_in4->in(MemNode::Address); 661 if (adr1->is_AddP() && adr2->is_AddP() && adr3->is_AddP() && adr4->is_AddP()) { 662 if ((adr1->in(AddPNode::Base) == adr3->in(AddPNode::Base)) && 663 (adr2->in(AddPNode::Base) == adr4->in(AddPNode::Base))) { 664 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in3, mul_in4); 665 register_new_node_with_ctrl_of(nn, n); 666 _igvn.replace_node(n, nn); 667 return nn; 668 } else if ((adr1->in(AddPNode::Base) == adr4->in(AddPNode::Base)) && 669 (adr2->in(AddPNode::Base) == adr3->in(AddPNode::Base))) { 670 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in4, mul_in3); 671 register_new_node_with_ctrl_of(nn, n); 672 _igvn.replace_node(n, nn); 673 return nn; 674 } 675 } 676 } 677 } 678 } 679 } 680 return nn; 681 } 682 683 //------------------------------conditional_move------------------------------- 684 // Attempt to replace a Phi with a conditional move. We have some pretty 685 // strict profitability requirements. All Phis at the merge point must 686 // be converted, so we can remove the control flow. We need to limit the 687 // number of c-moves to a small handful. All code that was in the side-arms 688 // of the CFG diamond is now speculatively executed. This code has to be 689 // "cheap enough". We are pretty much limited to CFG diamonds that merge 690 // 1 or 2 items with a total of 1 or 2 ops executed speculatively. 691 Node *PhaseIdealLoop::conditional_move( Node *region ) { 692 693 assert(region->is_Region(), "sanity check"); 694 if (region->req() != 3) return nullptr; 695 696 // Check for CFG diamond 697 Node *lp = region->in(1); 698 Node *rp = region->in(2); 699 if (!lp || !rp) return nullptr; 700 Node *lp_c = lp->in(0); 701 if (lp_c == nullptr || lp_c != rp->in(0) || !lp_c->is_If()) return nullptr; 702 IfNode *iff = lp_c->as_If(); 703 704 // Check for ops pinned in an arm of the diamond. 705 // Can't remove the control flow in this case 706 if (lp->outcnt() > 1) return nullptr; 707 if (rp->outcnt() > 1) return nullptr; 708 709 IdealLoopTree* r_loop = get_loop(region); 710 assert(r_loop == get_loop(iff), "sanity"); 711 // Always convert to CMOVE if all results are used only outside this loop. 712 bool used_inside_loop = (r_loop == _ltree_root); 713 714 // Check profitability 715 int cost = 0; 716 int phis = 0; 717 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 718 Node *out = region->fast_out(i); 719 if (!out->is_Phi()) continue; // Ignore other control edges, etc 720 phis++; 721 PhiNode* phi = out->as_Phi(); 722 BasicType bt = phi->type()->basic_type(); 723 switch (bt) { 724 case T_DOUBLE: 725 case T_FLOAT: 726 if (C->use_cmove()) { 727 continue; //TODO: maybe we want to add some cost 728 } 729 cost += Matcher::float_cmove_cost(); // Could be very expensive 730 break; 731 case T_LONG: { 732 cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's 733 } 734 case T_INT: // These all CMOV fine 735 case T_ADDRESS: { // (RawPtr) 736 cost++; 737 break; 738 } 739 case T_NARROWOOP: // Fall through 740 case T_OBJECT: { // Base oops are OK, but not derived oops 741 const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr(); 742 // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a 743 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus 744 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we 745 // have a Phi for the base here that we convert to a CMOVE all is well 746 // and good. But if the base is dead, we'll not make a CMOVE. Later 747 // the allocator will have to produce a base by creating a CMOVE of the 748 // relevant bases. This puts the allocator in the business of 749 // manufacturing expensive instructions, generally a bad plan. 750 // Just Say No to Conditionally-Moved Derived Pointers. 751 if (tp && tp->offset() != 0) 752 return nullptr; 753 cost++; 754 break; 755 } 756 default: 757 return nullptr; // In particular, can't do memory or I/O 758 } 759 // Add in cost any speculative ops 760 for (uint j = 1; j < region->req(); j++) { 761 Node *proj = region->in(j); 762 Node *inp = phi->in(j); 763 if (get_ctrl(inp) == proj) { // Found local op 764 cost++; 765 // Check for a chain of dependent ops; these will all become 766 // speculative in a CMOV. 767 for (uint k = 1; k < inp->req(); k++) 768 if (get_ctrl(inp->in(k)) == proj) 769 cost += ConditionalMoveLimit; // Too much speculative goo 770 } 771 } 772 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode. 773 // This will likely Split-If, a higher-payoff operation. 774 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) { 775 Node* use = phi->fast_out(k); 776 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr()) 777 cost += ConditionalMoveLimit; 778 // Is there a use inside the loop? 779 // Note: check only basic types since CMoveP is pinned. 780 if (!used_inside_loop && is_java_primitive(bt)) { 781 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use); 782 if (r_loop == u_loop || r_loop->is_member(u_loop)) { 783 used_inside_loop = true; 784 } 785 } 786 } 787 }//for 788 Node* bol = iff->in(1); 789 assert(!bol->is_OpaqueInitializedAssertionPredicate(), "Initialized Assertion Predicates cannot form a diamond with Halt"); 790 if (bol->is_OpaqueTemplateAssertionPredicate()) { 791 // Ignore Template Assertion Predicates with OpaqueTemplateAssertionPredicate nodes. 792 return nullptr; 793 } 794 assert(bol->Opcode() == Op_Bool, "Unexpected node"); 795 int cmp_op = bol->in(1)->Opcode(); 796 if (cmp_op == Op_SubTypeCheck) { // SubTypeCheck expansion expects an IfNode 797 return nullptr; 798 } 799 // It is expensive to generate flags from a float compare. 800 // Avoid duplicated float compare. 801 if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return nullptr; 802 803 float infrequent_prob = PROB_UNLIKELY_MAG(3); 804 // Ignore cost and blocks frequency if CMOVE can be moved outside the loop. 805 if (used_inside_loop) { 806 if (cost >= ConditionalMoveLimit) return nullptr; // Too much goo 807 808 // BlockLayoutByFrequency optimization moves infrequent branch 809 // from hot path. No point in CMOV'ing in such case (110 is used 810 // instead of 100 to take into account not exactness of float value). 811 if (BlockLayoutByFrequency) { 812 infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f); 813 } 814 } 815 // Check for highly predictable branch. No point in CMOV'ing if 816 // we are going to predict accurately all the time. 817 if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) { 818 //keep going 819 } else if (iff->_prob < infrequent_prob || 820 iff->_prob > (1.0f - infrequent_prob)) 821 return nullptr; 822 823 // -------------- 824 // Now replace all Phis with CMOV's 825 Node *cmov_ctrl = iff->in(0); 826 uint flip = (lp->Opcode() == Op_IfTrue); 827 Node_List wq; 828 while (1) { 829 PhiNode* phi = nullptr; 830 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 831 Node *out = region->fast_out(i); 832 if (out->is_Phi()) { 833 phi = out->as_Phi(); 834 break; 835 } 836 } 837 if (phi == nullptr || _igvn.type(phi) == Type::TOP) { 838 break; 839 } 840 if (PrintOpto && VerifyLoopOptimizations) { tty->print_cr("CMOV"); } 841 // Move speculative ops 842 wq.push(phi); 843 while (wq.size() > 0) { 844 Node *n = wq.pop(); 845 for (uint j = 1; j < n->req(); j++) { 846 Node* m = n->in(j); 847 if (m != nullptr && !is_dominator(get_ctrl(m), cmov_ctrl)) { 848 #ifndef PRODUCT 849 if (PrintOpto && VerifyLoopOptimizations) { 850 tty->print(" speculate: "); 851 m->dump(); 852 } 853 #endif 854 set_ctrl(m, cmov_ctrl); 855 wq.push(m); 856 } 857 } 858 } 859 Node* cmov = CMoveNode::make(iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi)); 860 register_new_node(cmov, cmov_ctrl); 861 _igvn.replace_node(phi, cmov); 862 #ifndef PRODUCT 863 if (TraceLoopOpts) { 864 tty->print("CMOV "); 865 r_loop->dump_head(); 866 if (Verbose) { 867 bol->in(1)->dump(1); 868 cmov->dump(1); 869 } 870 } 871 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } ); 872 #endif 873 } 874 875 // The useless CFG diamond will fold up later; see the optimization in 876 // RegionNode::Ideal. 877 _igvn._worklist.push(region); 878 879 return iff->in(1); 880 } 881 882 static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) { 883 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { 884 Node* u = m->fast_out(i); 885 if (u->is_CFG()) { 886 if (u->is_NeverBranch()) { 887 u = u->as_NeverBranch()->proj_out(0); 888 enqueue_cfg_uses(u, wq); 889 } else { 890 wq.push(u); 891 } 892 } 893 } 894 } 895 896 // Try moving a store out of a loop, right before the loop 897 Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) { 898 // Store has to be first in the loop body 899 IdealLoopTree *n_loop = get_loop(n_ctrl); 900 if (n->is_Store() && n_loop != _ltree_root && 901 n_loop->is_loop() && n_loop->_head->is_Loop() && 902 n->in(0) != nullptr) { 903 Node* address = n->in(MemNode::Address); 904 Node* value = n->in(MemNode::ValueIn); 905 Node* mem = n->in(MemNode::Memory); 906 IdealLoopTree* address_loop = get_loop(get_ctrl(address)); 907 IdealLoopTree* value_loop = get_loop(get_ctrl(value)); 908 909 // - address and value must be loop invariant 910 // - memory must be a memory Phi for the loop 911 // - Store must be the only store on this memory slice in the 912 // loop: if there's another store following this one then value 913 // written at iteration i by the second store could be overwritten 914 // at iteration i+n by the first store: it's not safe to move the 915 // first store out of the loop 916 // - nothing must observe the memory Phi: it guarantees no read 917 // before the store, we are also guaranteed the store post 918 // dominates the loop head (ignoring a possible early 919 // exit). Otherwise there would be extra Phi involved between the 920 // loop's Phi and the store. 921 // - there must be no early exit from the loop before the Store 922 // (such an exit most of the time would be an extra use of the 923 // memory Phi but sometimes is a bottom memory Phi that takes the 924 // store as input). 925 926 if (!n_loop->is_member(address_loop) && 927 !n_loop->is_member(value_loop) && 928 mem->is_Phi() && mem->in(0) == n_loop->_head && 929 mem->outcnt() == 1 && 930 mem->in(LoopNode::LoopBackControl) == n) { 931 932 assert(n_loop->_tail != nullptr, "need a tail"); 933 assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop"); 934 935 // Verify that there's no early exit of the loop before the store. 936 bool ctrl_ok = false; 937 { 938 // Follow control from loop head until n, we exit the loop or 939 // we reach the tail 940 ResourceMark rm; 941 Unique_Node_List wq; 942 wq.push(n_loop->_head); 943 944 for (uint next = 0; next < wq.size(); ++next) { 945 Node *m = wq.at(next); 946 if (m == n->in(0)) { 947 ctrl_ok = true; 948 continue; 949 } 950 assert(!has_ctrl(m), "should be CFG"); 951 if (!n_loop->is_member(get_loop(m)) || m == n_loop->_tail) { 952 ctrl_ok = false; 953 break; 954 } 955 enqueue_cfg_uses(m, wq); 956 if (wq.size() > 10) { 957 ctrl_ok = false; 958 break; 959 } 960 } 961 } 962 if (ctrl_ok) { 963 // move the Store 964 _igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem); 965 _igvn.replace_input_of(n, 0, n_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl)); 966 _igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl)); 967 // Disconnect the phi now. An empty phi can confuse other 968 // optimizations in this pass of loop opts. 969 _igvn.replace_node(mem, mem->in(LoopNode::EntryControl)); 970 n_loop->_body.yank(mem); 971 972 set_ctrl_and_loop(n, n->in(0)); 973 974 return n; 975 } 976 } 977 } 978 return nullptr; 979 } 980 981 // Try moving a store out of a loop, right after the loop 982 void PhaseIdealLoop::try_move_store_after_loop(Node* n) { 983 if (n->is_Store() && n->in(0) != nullptr) { 984 Node *n_ctrl = get_ctrl(n); 985 IdealLoopTree *n_loop = get_loop(n_ctrl); 986 // Store must be in a loop 987 if (n_loop != _ltree_root && !n_loop->_irreducible) { 988 Node* address = n->in(MemNode::Address); 989 Node* value = n->in(MemNode::ValueIn); 990 IdealLoopTree* address_loop = get_loop(get_ctrl(address)); 991 // address must be loop invariant 992 if (!n_loop->is_member(address_loop)) { 993 // Store must be last on this memory slice in the loop and 994 // nothing in the loop must observe it 995 Node* phi = nullptr; 996 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 997 Node* u = n->fast_out(i); 998 if (has_ctrl(u)) { // control use? 999 IdealLoopTree *u_loop = get_loop(get_ctrl(u)); 1000 if (!n_loop->is_member(u_loop)) { 1001 continue; 1002 } 1003 if (u->is_Phi() && u->in(0) == n_loop->_head) { 1004 assert(_igvn.type(u) == Type::MEMORY, "bad phi"); 1005 // multiple phis on the same slice are possible 1006 if (phi != nullptr) { 1007 return; 1008 } 1009 phi = u; 1010 continue; 1011 } 1012 } 1013 return; 1014 } 1015 if (phi != nullptr) { 1016 // Nothing in the loop before the store (next iteration) 1017 // must observe the stored value 1018 bool mem_ok = true; 1019 { 1020 ResourceMark rm; 1021 Unique_Node_List wq; 1022 wq.push(phi); 1023 for (uint next = 0; next < wq.size() && mem_ok; ++next) { 1024 Node *m = wq.at(next); 1025 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax && mem_ok; i++) { 1026 Node* u = m->fast_out(i); 1027 if (u->is_Store() || u->is_Phi()) { 1028 if (u != n) { 1029 wq.push(u); 1030 mem_ok = (wq.size() <= 10); 1031 } 1032 } else { 1033 mem_ok = false; 1034 break; 1035 } 1036 } 1037 } 1038 } 1039 if (mem_ok) { 1040 // Move the store out of the loop if the LCA of all 1041 // users (except for the phi) is outside the loop. 1042 Node* hook = new Node(1); 1043 hook->init_req(0, n_ctrl); // Add an input to prevent hook from being dead 1044 _igvn.rehash_node_delayed(phi); 1045 int count = phi->replace_edge(n, hook, &_igvn); 1046 assert(count > 0, "inconsistent phi"); 1047 1048 // Compute latest point this store can go 1049 Node* lca = get_late_ctrl(n, get_ctrl(n)); 1050 if (lca->is_OuterStripMinedLoop()) { 1051 lca = lca->in(LoopNode::EntryControl); 1052 } 1053 if (n_loop->is_member(get_loop(lca))) { 1054 // LCA is in the loop - bail out 1055 _igvn.replace_node(hook, n); 1056 return; 1057 } 1058 #ifdef ASSERT 1059 if (n_loop->_head->is_Loop() && n_loop->_head->as_Loop()->is_strip_mined()) { 1060 assert(n_loop->_head->Opcode() == Op_CountedLoop, "outer loop is a strip mined"); 1061 n_loop->_head->as_Loop()->verify_strip_mined(1); 1062 Node* outer = n_loop->_head->as_CountedLoop()->outer_loop(); 1063 IdealLoopTree* outer_loop = get_loop(outer); 1064 assert(n_loop->_parent == outer_loop, "broken loop tree"); 1065 assert(get_loop(lca) == outer_loop, "safepoint in outer loop consume all memory state"); 1066 } 1067 #endif 1068 lca = place_outside_loop(lca, n_loop); 1069 assert(!n_loop->is_member(get_loop(lca)), "control must not be back in the loop"); 1070 assert(get_loop(lca)->_nest < n_loop->_nest || get_loop(lca)->_head->as_Loop()->is_in_infinite_subgraph(), "must not be moved into inner loop"); 1071 1072 // Move store out of the loop 1073 _igvn.replace_node(hook, n->in(MemNode::Memory)); 1074 _igvn.replace_input_of(n, 0, lca); 1075 set_ctrl_and_loop(n, lca); 1076 1077 // Disconnect the phi now. An empty phi can confuse other 1078 // optimizations in this pass of loop opts.. 1079 if (phi->in(LoopNode::LoopBackControl) == phi) { 1080 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl)); 1081 n_loop->_body.yank(phi); 1082 } 1083 } 1084 } 1085 } 1086 } 1087 } 1088 } 1089 1090 // Split some nodes that take a counted loop phi as input at a counted 1091 // loop can cause vectorization of some expressions to fail 1092 bool PhaseIdealLoop::split_thru_phi_could_prevent_vectorization(Node* n, Node* n_blk) { 1093 if (!n_blk->is_CountedLoop()) { 1094 return false; 1095 } 1096 1097 int opcode = n->Opcode(); 1098 1099 if (opcode != Op_AndI && 1100 opcode != Op_MulI && 1101 opcode != Op_RotateRight && 1102 opcode != Op_RShiftI) { 1103 return false; 1104 } 1105 1106 return n->in(1) == n_blk->as_BaseCountedLoop()->phi(); 1107 } 1108 1109 //------------------------------split_if_with_blocks_pre----------------------- 1110 // Do the real work in a non-recursive function. Data nodes want to be 1111 // cloned in the pre-order so they can feed each other nicely. 1112 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) { 1113 // Cloning these guys is unlikely to win 1114 int n_op = n->Opcode(); 1115 if (n_op == Op_MergeMem) { 1116 return n; 1117 } 1118 if (n->is_Proj()) { 1119 return n; 1120 } 1121 // Do not clone-up CmpFXXX variations, as these are always 1122 // followed by a CmpI 1123 if (n->is_Cmp()) { 1124 return n; 1125 } 1126 // Attempt to use a conditional move instead of a phi/branch 1127 if (ConditionalMoveLimit > 0 && n_op == Op_Region) { 1128 Node *cmov = conditional_move( n ); 1129 if (cmov) { 1130 return cmov; 1131 } 1132 } 1133 if (n->is_CFG() || n->is_LoadStore()) { 1134 return n; 1135 } 1136 if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd 1137 if (!C->major_progress()) { // If chance of no more loop opts... 1138 _igvn._worklist.push(n); // maybe we'll remove them 1139 } 1140 return n; 1141 } 1142 1143 if (n->is_Con()) { 1144 return n; // No cloning for Con nodes 1145 } 1146 1147 Node *n_ctrl = get_ctrl(n); 1148 if (!n_ctrl) { 1149 return n; // Dead node 1150 } 1151 1152 Node* res = try_move_store_before_loop(n, n_ctrl); 1153 if (res != nullptr) { 1154 return n; 1155 } 1156 1157 // Attempt to remix address expressions for loop invariants 1158 Node *m = remix_address_expressions( n ); 1159 if( m ) return m; 1160 1161 if (n_op == Op_AddI) { 1162 Node *nn = convert_add_to_muladd( n ); 1163 if ( nn ) return nn; 1164 } 1165 1166 if (n->is_ConstraintCast()) { 1167 Node* dom_cast = n->as_ConstraintCast()->dominating_cast(&_igvn, this); 1168 // ConstraintCastNode::dominating_cast() uses node control input to determine domination. 1169 // Node control inputs don't necessarily agree with loop control info (due to 1170 // transformations happened in between), thus additional dominance check is needed 1171 // to keep loop info valid. 1172 if (dom_cast != nullptr && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) { 1173 _igvn.replace_node(n, dom_cast); 1174 return dom_cast; 1175 } 1176 } 1177 1178 // Determine if the Node has inputs from some local Phi. 1179 // Returns the block to clone thru. 1180 Node *n_blk = has_local_phi_input( n ); 1181 if( !n_blk ) return n; 1182 1183 // Do not clone the trip counter through on a CountedLoop 1184 // (messes up the canonical shape). 1185 if (((n_blk->is_CountedLoop() || (n_blk->is_Loop() && n_blk->as_Loop()->is_loop_nest_inner_loop())) && n->Opcode() == Op_AddI) || 1186 (n_blk->is_LongCountedLoop() && n->Opcode() == Op_AddL)) { 1187 return n; 1188 } 1189 // Pushing a shift through the iv Phi can get in the way of addressing optimizations or range check elimination 1190 if (n_blk->is_BaseCountedLoop() && n->Opcode() == Op_LShift(n_blk->as_BaseCountedLoop()->bt()) && 1191 n->in(1) == n_blk->as_BaseCountedLoop()->phi()) { 1192 return n; 1193 } 1194 1195 if (split_thru_phi_could_prevent_vectorization(n, n_blk)) { 1196 return n; 1197 } 1198 1199 // Check for having no control input; not pinned. Allow 1200 // dominating control. 1201 if (n->in(0)) { 1202 Node *dom = idom(n_blk); 1203 if (dom_lca(n->in(0), dom) != n->in(0)) { 1204 return n; 1205 } 1206 } 1207 // Policy: when is it profitable. You must get more wins than 1208 // policy before it is considered profitable. Policy is usually 0, 1209 // so 1 win is considered profitable. Big merges will require big 1210 // cloning, so get a larger policy. 1211 int policy = n_blk->req() >> 2; 1212 1213 // If the loop is a candidate for range check elimination, 1214 // delay splitting through it's phi until a later loop optimization 1215 if (n_blk->is_BaseCountedLoop()) { 1216 IdealLoopTree *lp = get_loop(n_blk); 1217 if (lp && lp->_rce_candidate) { 1218 return n; 1219 } 1220 } 1221 1222 if (must_throttle_split_if()) return n; 1223 1224 // Split 'n' through the merge point if it is profitable 1225 Node *phi = split_thru_phi( n, n_blk, policy ); 1226 if (!phi) return n; 1227 1228 // Found a Phi to split thru! 1229 // Replace 'n' with the new phi 1230 _igvn.replace_node( n, phi ); 1231 // Moved a load around the loop, 'en-registering' something. 1232 if (n_blk->is_Loop() && n->is_Load() && 1233 !phi->in(LoopNode::LoopBackControl)->is_Load()) 1234 C->set_major_progress(); 1235 1236 return phi; 1237 } 1238 1239 static bool merge_point_too_heavy(Compile* C, Node* region) { 1240 // Bail out if the region and its phis have too many users. 1241 int weight = 0; 1242 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 1243 weight += region->fast_out(i)->outcnt(); 1244 } 1245 int nodes_left = C->max_node_limit() - C->live_nodes(); 1246 if (weight * 8 > nodes_left) { 1247 if (PrintOpto) { 1248 tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight); 1249 } 1250 return true; 1251 } else { 1252 return false; 1253 } 1254 } 1255 1256 static bool merge_point_safe(Node* region) { 1257 // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode 1258 // having a PhiNode input. This sidesteps the dangerous case where the split 1259 // ConvI2LNode may become TOP if the input Value() does not 1260 // overlap the ConvI2L range, leaving a node which may not dominate its 1261 // uses. 1262 // A better fix for this problem can be found in the BugTraq entry, but 1263 // expediency for Mantis demands this hack. 1264 #ifdef _LP64 1265 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 1266 Node* n = region->fast_out(i); 1267 if (n->is_Phi()) { 1268 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 1269 Node* m = n->fast_out(j); 1270 if (m->Opcode() == Op_ConvI2L) 1271 return false; 1272 if (m->is_CastII()) { 1273 return false; 1274 } 1275 } 1276 } 1277 } 1278 #endif 1279 return true; 1280 } 1281 1282 1283 //------------------------------place_outside_loop--------------------------------- 1284 // Place some computation outside of this loop on the path to the use passed as argument 1285 Node* PhaseIdealLoop::place_outside_loop(Node* useblock, IdealLoopTree* loop) const { 1286 Node* head = loop->_head; 1287 assert(!loop->is_member(get_loop(useblock)), "must be outside loop"); 1288 if (head->is_Loop() && head->as_Loop()->is_strip_mined()) { 1289 loop = loop->_parent; 1290 assert(loop->_head->is_OuterStripMinedLoop(), "malformed strip mined loop"); 1291 } 1292 1293 // Pick control right outside the loop 1294 for (;;) { 1295 Node* dom = idom(useblock); 1296 if (loop->is_member(get_loop(dom))) { 1297 break; 1298 } 1299 useblock = dom; 1300 } 1301 assert(find_non_split_ctrl(useblock) == useblock, "should be non split control"); 1302 return useblock; 1303 } 1304 1305 1306 bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) { 1307 if (!n->is_If() || n->is_BaseCountedLoopEnd()) { 1308 return false; 1309 } 1310 if (!n->in(0)->is_Region()) { 1311 return false; 1312 } 1313 1314 Node* region = n->in(0); 1315 Node* dom = idom(region); 1316 if (!dom->is_If() || !n->as_If()->same_condition(dom, &_igvn)) { 1317 return false; 1318 } 1319 IfNode* dom_if = dom->as_If(); 1320 Node* proj_true = dom_if->proj_out(1); 1321 Node* proj_false = dom_if->proj_out(0); 1322 1323 for (uint i = 1; i < region->req(); i++) { 1324 if (is_dominator(proj_true, region->in(i))) { 1325 continue; 1326 } 1327 if (is_dominator(proj_false, region->in(i))) { 1328 continue; 1329 } 1330 return false; 1331 } 1332 1333 return true; 1334 } 1335 1336 1337 bool PhaseIdealLoop::can_split_if(Node* n_ctrl) { 1338 if (must_throttle_split_if()) { 1339 return false; 1340 } 1341 1342 // Do not do 'split-if' if irreducible loops are present. 1343 if (_has_irreducible_loops) { 1344 return false; 1345 } 1346 1347 if (merge_point_too_heavy(C, n_ctrl)) { 1348 return false; 1349 } 1350 1351 // Do not do 'split-if' if some paths are dead. First do dead code 1352 // elimination and then see if its still profitable. 1353 for (uint i = 1; i < n_ctrl->req(); i++) { 1354 if (n_ctrl->in(i) == C->top()) { 1355 return false; 1356 } 1357 } 1358 1359 // If trying to do a 'Split-If' at the loop head, it is only 1360 // profitable if the cmp folds up on BOTH paths. Otherwise we 1361 // risk peeling a loop forever. 1362 1363 // CNC - Disabled for now. Requires careful handling of loop 1364 // body selection for the cloned code. Also, make sure we check 1365 // for any input path not being in the same loop as n_ctrl. For 1366 // irreducible loops we cannot check for 'n_ctrl->is_Loop()' 1367 // because the alternative loop entry points won't be converted 1368 // into LoopNodes. 1369 IdealLoopTree *n_loop = get_loop(n_ctrl); 1370 for (uint j = 1; j < n_ctrl->req(); j++) { 1371 if (get_loop(n_ctrl->in(j)) != n_loop) { 1372 return false; 1373 } 1374 } 1375 1376 // Check for safety of the merge point. 1377 if (!merge_point_safe(n_ctrl)) { 1378 return false; 1379 } 1380 1381 return true; 1382 } 1383 1384 // Detect if the node is the inner strip-mined loop 1385 // Return: null if it's not the case, or the exit of outer strip-mined loop 1386 static Node* is_inner_of_stripmined_loop(const Node* out) { 1387 Node* out_le = nullptr; 1388 1389 if (out->is_CountedLoopEnd()) { 1390 const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode(); 1391 1392 if (loop != nullptr && loop->is_strip_mined()) { 1393 out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit(); 1394 } 1395 } 1396 1397 return out_le; 1398 } 1399 1400 //------------------------------split_if_with_blocks_post---------------------- 1401 // Do the real work in a non-recursive function. CFG hackery wants to be 1402 // in the post-order, so it can dirty the I-DOM info and not use the dirtied 1403 // info. 1404 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) { 1405 1406 // Cloning Cmp through Phi's involves the split-if transform. 1407 // FastLock is not used by an If 1408 if (n->is_Cmp() && !n->is_FastLock()) { 1409 Node *n_ctrl = get_ctrl(n); 1410 // Determine if the Node has inputs from some local Phi. 1411 // Returns the block to clone thru. 1412 Node *n_blk = has_local_phi_input(n); 1413 if (n_blk != n_ctrl) { 1414 return; 1415 } 1416 1417 if (!can_split_if(n_ctrl)) { 1418 return; 1419 } 1420 1421 if (n->outcnt() != 1) { 1422 return; // Multiple bool's from 1 compare? 1423 } 1424 Node *bol = n->unique_out(); 1425 assert(bol->is_Bool(), "expect a bool here"); 1426 if (bol->outcnt() != 1) { 1427 return;// Multiple branches from 1 compare? 1428 } 1429 Node *iff = bol->unique_out(); 1430 1431 // Check some safety conditions 1432 if (iff->is_If()) { // Classic split-if? 1433 if (iff->in(0) != n_ctrl) { 1434 return; // Compare must be in same blk as if 1435 } 1436 } else if (iff->is_CMove()) { // Trying to split-up a CMOVE 1437 // Can't split CMove with different control. 1438 if (get_ctrl(iff) != n_ctrl) { 1439 return; 1440 } 1441 if (get_ctrl(iff->in(2)) == n_ctrl || 1442 get_ctrl(iff->in(3)) == n_ctrl) { 1443 return; // Inputs not yet split-up 1444 } 1445 if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) { 1446 return; // Loop-invar test gates loop-varying CMOVE 1447 } 1448 } else { 1449 return; // some other kind of node, such as an Allocate 1450 } 1451 1452 // When is split-if profitable? Every 'win' on means some control flow 1453 // goes dead, so it's almost always a win. 1454 int policy = 0; 1455 // Split compare 'n' through the merge point if it is profitable 1456 Node *phi = split_thru_phi( n, n_ctrl, policy); 1457 if (!phi) { 1458 return; 1459 } 1460 1461 // Found a Phi to split thru! 1462 // Replace 'n' with the new phi 1463 _igvn.replace_node(n, phi); 1464 1465 // Now split the bool up thru the phi 1466 Node *bolphi = split_thru_phi(bol, n_ctrl, -1); 1467 guarantee(bolphi != nullptr, "null boolean phi node"); 1468 1469 _igvn.replace_node(bol, bolphi); 1470 assert(iff->in(1) == bolphi, ""); 1471 1472 if (bolphi->Value(&_igvn)->singleton()) { 1473 return; 1474 } 1475 1476 // Conditional-move? Must split up now 1477 if (!iff->is_If()) { 1478 Node *cmovphi = split_thru_phi(iff, n_ctrl, -1); 1479 _igvn.replace_node(iff, cmovphi); 1480 return; 1481 } 1482 1483 // Now split the IF 1484 C->print_method(PHASE_BEFORE_SPLIT_IF, 4, iff); 1485 if ((PrintOpto && VerifyLoopOptimizations) || TraceLoopOpts) { 1486 tty->print_cr("Split-If"); 1487 } 1488 do_split_if(iff); 1489 C->print_method(PHASE_AFTER_SPLIT_IF, 4, iff); 1490 return; 1491 } 1492 1493 // Two identical ifs back to back can be merged 1494 if (try_merge_identical_ifs(n)) { 1495 return; 1496 } 1497 1498 // Check for an IF ready to split; one that has its 1499 // condition codes input coming from a Phi at the block start. 1500 int n_op = n->Opcode(); 1501 1502 // Check for an IF being dominated by another IF same test 1503 if (n_op == Op_If || 1504 n_op == Op_RangeCheck) { 1505 Node *bol = n->in(1); 1506 uint max = bol->outcnt(); 1507 // Check for same test used more than once? 1508 if (bol->is_Bool() && (max > 1 || bol->in(1)->is_SubTypeCheck())) { 1509 // Search up IDOMs to see if this IF is dominated. 1510 Node* cmp = bol->in(1); 1511 Node *cutoff = cmp->is_SubTypeCheck() ? dom_lca(get_ctrl(cmp->in(1)), get_ctrl(cmp->in(2))) : get_ctrl(bol); 1512 1513 // Now search up IDOMs till cutoff, looking for a dominating test 1514 Node *prevdom = n; 1515 Node *dom = idom(prevdom); 1516 while (dom != cutoff) { 1517 if (dom->req() > 1 && n->as_If()->same_condition(dom, &_igvn) && prevdom->in(0) == dom && 1518 safe_for_if_replacement(dom)) { 1519 // It's invalid to move control dependent data nodes in the inner 1520 // strip-mined loop, because: 1521 // 1) break validation of LoopNode::verify_strip_mined() 1522 // 2) move code with side-effect in strip-mined loop 1523 // Move to the exit of outer strip-mined loop in that case. 1524 Node* out_le = is_inner_of_stripmined_loop(dom); 1525 if (out_le != nullptr) { 1526 prevdom = out_le; 1527 } 1528 // Replace the dominated test with an obvious true or false. 1529 // Place it on the IGVN worklist for later cleanup. 1530 C->set_major_progress(); 1531 // Split if: pin array accesses that are control dependent on a range check and moved to a regular if, 1532 // to prevent an array load from floating above its range check. There are three cases: 1533 // 1. Move from RangeCheck "a" to RangeCheck "b": don't need to pin. If we ever remove b, then we pin 1534 // all its array accesses at that point. 1535 // 2. We move from RangeCheck "a" to regular if "b": need to pin. If we ever remove b, then its array 1536 // accesses would start to float, since we don't pin at that point. 1537 // 3. If we move from regular if: don't pin. All array accesses are already assumed to be pinned. 1538 bool pin_array_access_nodes = n->Opcode() == Op_RangeCheck && 1539 prevdom->in(0)->Opcode() != Op_RangeCheck; 1540 dominated_by(prevdom->as_IfProj(), n->as_If(), false, pin_array_access_nodes); 1541 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } ); 1542 return; 1543 } 1544 prevdom = dom; 1545 dom = idom(prevdom); 1546 } 1547 } 1548 } 1549 1550 try_sink_out_of_loop(n); 1551 if (C->failing()) { 1552 return; 1553 } 1554 1555 try_move_store_after_loop(n); 1556 } 1557 1558 // Transform: 1559 // 1560 // if (some_condition) { 1561 // // body 1 1562 // } else { 1563 // // body 2 1564 // } 1565 // if (some_condition) { 1566 // // body 3 1567 // } else { 1568 // // body 4 1569 // } 1570 // 1571 // into: 1572 // 1573 // 1574 // if (some_condition) { 1575 // // body 1 1576 // // body 3 1577 // } else { 1578 // // body 2 1579 // // body 4 1580 // } 1581 bool PhaseIdealLoop::try_merge_identical_ifs(Node* n) { 1582 if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) { 1583 Node *n_ctrl = n->in(0); 1584 IfNode* dom_if = idom(n_ctrl)->as_If(); 1585 if (n->in(1) != dom_if->in(1)) { 1586 assert(n->in(1)->in(1)->is_SubTypeCheck() && 1587 (n->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr || 1588 dom_if->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr), "only for subtype checks with profile data attached"); 1589 _igvn.replace_input_of(n, 1, dom_if->in(1)); 1590 } 1591 ProjNode* dom_proj_true = dom_if->proj_out(1); 1592 ProjNode* dom_proj_false = dom_if->proj_out(0); 1593 1594 // Now split the IF 1595 RegionNode* new_false_region; 1596 RegionNode* new_true_region; 1597 do_split_if(n, &new_false_region, &new_true_region); 1598 assert(new_false_region->req() == new_true_region->req(), ""); 1599 #ifdef ASSERT 1600 for (uint i = 1; i < new_false_region->req(); ++i) { 1601 assert(new_false_region->in(i)->in(0) == new_true_region->in(i)->in(0), "unexpected shape following split if"); 1602 assert(i == new_false_region->req() - 1 || new_false_region->in(i)->in(0)->in(1) == new_false_region->in(i + 1)->in(0)->in(1), "unexpected shape following split if"); 1603 } 1604 #endif 1605 assert(new_false_region->in(1)->in(0)->in(1) == dom_if->in(1), "dominating if and dominated if after split must share test"); 1606 1607 // We now have: 1608 // if (some_condition) { 1609 // // body 1 1610 // if (some_condition) { 1611 // body3: // new_true_region 1612 // // body3 1613 // } else { 1614 // goto body4; 1615 // } 1616 // } else { 1617 // // body 2 1618 // if (some_condition) { 1619 // goto body3; 1620 // } else { 1621 // body4: // new_false_region 1622 // // body4; 1623 // } 1624 // } 1625 // 1626 1627 // clone pinned nodes thru the resulting regions 1628 push_pinned_nodes_thru_region(dom_if, new_true_region); 1629 push_pinned_nodes_thru_region(dom_if, new_false_region); 1630 1631 // Optimize out the cloned ifs. Because pinned nodes were cloned, this also allows a CastPP that would be dependent 1632 // on a projection of n to have the dom_if as a control dependency. We don't want the CastPP to end up with an 1633 // unrelated control dependency. 1634 for (uint i = 1; i < new_false_region->req(); i++) { 1635 if (is_dominator(dom_proj_true, new_false_region->in(i))) { 1636 dominated_by(dom_proj_true->as_IfProj(), new_false_region->in(i)->in(0)->as_If()); 1637 } else { 1638 assert(is_dominator(dom_proj_false, new_false_region->in(i)), "bad if"); 1639 dominated_by(dom_proj_false->as_IfProj(), new_false_region->in(i)->in(0)->as_If()); 1640 } 1641 } 1642 return true; 1643 } 1644 return false; 1645 } 1646 1647 void PhaseIdealLoop::push_pinned_nodes_thru_region(IfNode* dom_if, Node* region) { 1648 for (DUIterator i = region->outs(); region->has_out(i); i++) { 1649 Node* u = region->out(i); 1650 if (!has_ctrl(u) || u->is_Phi() || !u->depends_only_on_test() || !_igvn.no_dependent_zero_check(u)) { 1651 continue; 1652 } 1653 assert(u->in(0) == region, "not a control dependent node?"); 1654 uint j = 1; 1655 for (; j < u->req(); ++j) { 1656 Node* in = u->in(j); 1657 if (!is_dominator(ctrl_or_self(in), dom_if)) { 1658 break; 1659 } 1660 } 1661 if (j == u->req()) { 1662 Node *phi = PhiNode::make_blank(region, u); 1663 for (uint k = 1; k < region->req(); ++k) { 1664 Node* clone = u->clone(); 1665 clone->set_req(0, region->in(k)); 1666 register_new_node(clone, region->in(k)); 1667 phi->init_req(k, clone); 1668 } 1669 register_new_node(phi, region); 1670 _igvn.replace_node(u, phi); 1671 --i; 1672 } 1673 } 1674 } 1675 1676 bool PhaseIdealLoop::safe_for_if_replacement(const Node* dom) const { 1677 if (!dom->is_CountedLoopEnd()) { 1678 return true; 1679 } 1680 CountedLoopEndNode* le = dom->as_CountedLoopEnd(); 1681 CountedLoopNode* cl = le->loopnode(); 1682 if (cl == nullptr) { 1683 return true; 1684 } 1685 if (!cl->is_main_loop()) { 1686 return true; 1687 } 1688 if (cl->is_canonical_loop_entry() == nullptr) { 1689 return true; 1690 } 1691 // Further unrolling is possible so loop exit condition might change 1692 return false; 1693 } 1694 1695 // See if a shared loop-varying computation has no loop-varying uses. 1696 // Happens if something is only used for JVM state in uncommon trap exits, 1697 // like various versions of induction variable+offset. Clone the 1698 // computation per usage to allow it to sink out of the loop. 1699 void PhaseIdealLoop::try_sink_out_of_loop(Node* n) { 1700 if (has_ctrl(n) && 1701 !n->is_Phi() && 1702 !n->is_Bool() && 1703 !n->is_Proj() && 1704 !n->is_MergeMem() && 1705 !n->is_CMove() && 1706 !n->is_OpaqueNotNull() && 1707 !n->is_OpaqueInitializedAssertionPredicate() && 1708 !n->is_OpaqueTemplateAssertionPredicate() && 1709 !n->is_Type()) { 1710 Node *n_ctrl = get_ctrl(n); 1711 IdealLoopTree *n_loop = get_loop(n_ctrl); 1712 1713 if (n->in(0) != nullptr) { 1714 IdealLoopTree* loop_ctrl = get_loop(n->in(0)); 1715 if (n_loop != loop_ctrl && n_loop->is_member(loop_ctrl)) { 1716 // n has a control input inside a loop but get_ctrl() is member of an outer loop. This could happen, for example, 1717 // for Div nodes inside a loop (control input inside loop) without a use except for an UCT (outside the loop). 1718 // Rewire control of n to right outside of the loop, regardless if its input(s) are later sunk or not. 1719 Node* maybe_pinned_n = n; 1720 Node* outside_ctrl = place_outside_loop(n_ctrl, loop_ctrl); 1721 if (n->depends_only_on_test()) { 1722 Node* pinned_clone = n->pin_array_access_node(); 1723 if (pinned_clone != nullptr) { 1724 // Pin array access nodes: if this is an array load, it's going to be dependent on a condition that's not a 1725 // range check for that access. If that condition is replaced by an identical dominating one, then an 1726 // unpinned load would risk floating above its range check. 1727 register_new_node(pinned_clone, n_ctrl); 1728 maybe_pinned_n = pinned_clone; 1729 _igvn.replace_node(n, pinned_clone); 1730 } 1731 } 1732 _igvn.replace_input_of(maybe_pinned_n, 0, outside_ctrl); 1733 } 1734 } 1735 if (n_loop != _ltree_root && n->outcnt() > 1) { 1736 // Compute early control: needed for anti-dependence analysis. It's also possible that as a result of 1737 // previous transformations in this loop opts round, the node can be hoisted now: early control will tell us. 1738 Node* early_ctrl = compute_early_ctrl(n, n_ctrl); 1739 if (n_loop->is_member(get_loop(early_ctrl)) && // check that this one can't be hoisted now 1740 ctrl_of_all_uses_out_of_loop(n, early_ctrl, n_loop)) { // All uses in outer loops! 1741 if (n->is_Store() || n->is_LoadStore()) { 1742 assert(false, "no node with a side effect"); 1743 C->record_failure("no node with a side effect"); 1744 return; 1745 } 1746 Node* outer_loop_clone = nullptr; 1747 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin;) { 1748 Node* u = n->last_out(j); // Clone private computation per use 1749 _igvn.rehash_node_delayed(u); 1750 Node* x = nullptr; 1751 if (n->depends_only_on_test()) { 1752 // Pin array access nodes: if this is an array load, it's going to be dependent on a condition that's not a 1753 // range check for that access. If that condition is replaced by an identical dominating one, then an 1754 // unpinned load would risk floating above its range check. 1755 x = n->pin_array_access_node(); 1756 } 1757 if (x == nullptr) { 1758 x = n->clone(); 1759 } 1760 Node* x_ctrl = nullptr; 1761 if (u->is_Phi()) { 1762 // Replace all uses of normal nodes. Replace Phi uses 1763 // individually, so the separate Nodes can sink down 1764 // different paths. 1765 uint k = 1; 1766 while (u->in(k) != n) k++; 1767 u->set_req(k, x); 1768 // x goes next to Phi input path 1769 x_ctrl = u->in(0)->in(k); 1770 // Find control for 'x' next to use but not inside inner loops. 1771 x_ctrl = place_outside_loop(x_ctrl, n_loop); 1772 --j; 1773 } else { // Normal use 1774 if (has_ctrl(u)) { 1775 x_ctrl = get_ctrl(u); 1776 } else { 1777 x_ctrl = u->in(0); 1778 } 1779 // Find control for 'x' next to use but not inside inner loops. 1780 x_ctrl = place_outside_loop(x_ctrl, n_loop); 1781 // Replace all uses 1782 if (u->is_ConstraintCast() && _igvn.type(n)->higher_equal(u->bottom_type()) && u->in(0) == x_ctrl) { 1783 // If we're sinking a chain of data nodes, we might have inserted a cast to pin the use which is not necessary 1784 // anymore now that we're going to pin n as well 1785 _igvn.replace_node(u, x); 1786 --j; 1787 } else { 1788 int nb = u->replace_edge(n, x, &_igvn); 1789 j -= nb; 1790 } 1791 } 1792 1793 if (n->is_Load()) { 1794 // For loads, add a control edge to a CFG node outside of the loop 1795 // to force them to not combine and return back inside the loop 1796 // during GVN optimization (4641526). 1797 assert(x_ctrl == get_late_ctrl_with_anti_dep(x->as_Load(), early_ctrl, x_ctrl), "anti-dependences were already checked"); 1798 1799 IdealLoopTree* x_loop = get_loop(x_ctrl); 1800 Node* x_head = x_loop->_head; 1801 if (x_head->is_Loop() && x_head->is_OuterStripMinedLoop()) { 1802 // Do not add duplicate LoadNodes to the outer strip mined loop 1803 if (outer_loop_clone != nullptr) { 1804 _igvn.replace_node(x, outer_loop_clone); 1805 continue; 1806 } 1807 outer_loop_clone = x; 1808 } 1809 x->set_req(0, x_ctrl); 1810 } else if (n->in(0) != nullptr){ 1811 x->set_req(0, x_ctrl); 1812 } 1813 assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone"); 1814 assert(!n_loop->is_member(get_loop(x_ctrl)), "should have moved out of loop"); 1815 register_new_node(x, x_ctrl); 1816 1817 // Chain of AddP nodes: (AddP base (AddP base (AddP base ))) 1818 // All AddP nodes must keep the same base after sinking so: 1819 // 1- We don't add a CastPP here until the last one of the chain is sunk: if part of the chain is not sunk, 1820 // their bases remain the same. 1821 // (see 2- below) 1822 assert(!x->is_AddP() || !x->in(AddPNode::Address)->is_AddP() || 1823 x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base) || 1824 !x->in(AddPNode::Address)->in(AddPNode::Base)->eqv_uncast(x->in(AddPNode::Base)), "unexpected AddP shape"); 1825 if (x->in(0) == nullptr && !x->is_DecodeNarrowPtr() && 1826 !(x->is_AddP() && x->in(AddPNode::Address)->is_AddP() && x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base))) { 1827 assert(!x->is_Load(), "load should be pinned"); 1828 // Use a cast node to pin clone out of loop 1829 Node* cast = nullptr; 1830 for (uint k = 0; k < x->req(); k++) { 1831 Node* in = x->in(k); 1832 if (in != nullptr && n_loop->is_member(get_loop(get_ctrl(in)))) { 1833 const Type* in_t = _igvn.type(in); 1834 cast = ConstraintCastNode::make_cast_for_type(x_ctrl, in, in_t, 1835 ConstraintCastNode::UnconditionalDependency, nullptr); 1836 } 1837 if (cast != nullptr) { 1838 Node* prev = _igvn.hash_find_insert(cast); 1839 if (prev != nullptr && get_ctrl(prev) == x_ctrl) { 1840 cast->destruct(&_igvn); 1841 cast = prev; 1842 } else { 1843 register_new_node(cast, x_ctrl); 1844 } 1845 x->replace_edge(in, cast); 1846 // Chain of AddP nodes: 1847 // 2- A CastPP of the base is only added now that all AddP nodes are sunk 1848 if (x->is_AddP() && k == AddPNode::Base) { 1849 update_addp_chain_base(x, n->in(AddPNode::Base), cast); 1850 } 1851 break; 1852 } 1853 } 1854 assert(cast != nullptr, "must have added a cast to pin the node"); 1855 } 1856 } 1857 _igvn.remove_dead_node(n); 1858 } 1859 _dom_lca_tags_round = 0; 1860 } 1861 } 1862 } 1863 1864 void PhaseIdealLoop::update_addp_chain_base(Node* x, Node* old_base, Node* new_base) { 1865 ResourceMark rm; 1866 Node_List wq; 1867 wq.push(x); 1868 while (wq.size() != 0) { 1869 Node* n = wq.pop(); 1870 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1871 Node* u = n->fast_out(i); 1872 if (u->is_AddP() && u->in(AddPNode::Base) == old_base) { 1873 _igvn.replace_input_of(u, AddPNode::Base, new_base); 1874 wq.push(u); 1875 } 1876 } 1877 } 1878 } 1879 1880 // Compute the early control of a node by following its inputs until we reach 1881 // nodes that are pinned. Then compute the LCA of the control of all pinned nodes. 1882 Node* PhaseIdealLoop::compute_early_ctrl(Node* n, Node* n_ctrl) { 1883 Node* early_ctrl = nullptr; 1884 ResourceMark rm; 1885 Unique_Node_List wq; 1886 wq.push(n); 1887 for (uint i = 0; i < wq.size(); i++) { 1888 Node* m = wq.at(i); 1889 Node* c = nullptr; 1890 if (m->is_CFG()) { 1891 c = m; 1892 } else if (m->pinned()) { 1893 c = m->in(0); 1894 } else { 1895 for (uint j = 0; j < m->req(); j++) { 1896 Node* in = m->in(j); 1897 if (in != nullptr) { 1898 wq.push(in); 1899 } 1900 } 1901 } 1902 if (c != nullptr) { 1903 assert(is_dominator(c, n_ctrl), "control input must dominate current control"); 1904 if (early_ctrl == nullptr || is_dominator(early_ctrl, c)) { 1905 early_ctrl = c; 1906 } 1907 } 1908 } 1909 assert(is_dominator(early_ctrl, n_ctrl), "early control must dominate current control"); 1910 return early_ctrl; 1911 } 1912 1913 bool PhaseIdealLoop::ctrl_of_all_uses_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop) { 1914 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1915 Node* u = n->fast_out(i); 1916 if (u->is_Opaque1()) { 1917 return false; // Found loop limit, bugfix for 4677003 1918 } 1919 // We can't reuse tags in PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal() so make sure calls to 1920 // get_late_ctrl_with_anti_dep() use their own tag 1921 _dom_lca_tags_round++; 1922 assert(_dom_lca_tags_round != 0, "shouldn't wrap around"); 1923 1924 if (u->is_Phi()) { 1925 for (uint j = 1; j < u->req(); ++j) { 1926 if (u->in(j) == n && !ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, u->in(0)->in(j))) { 1927 return false; 1928 } 1929 } 1930 } else { 1931 Node* ctrl = has_ctrl(u) ? get_ctrl(u) : u->in(0); 1932 if (!ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, ctrl)) { 1933 return false; 1934 } 1935 } 1936 } 1937 return true; 1938 } 1939 1940 bool PhaseIdealLoop::ctrl_of_use_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop, Node* ctrl) { 1941 if (n->is_Load()) { 1942 ctrl = get_late_ctrl_with_anti_dep(n->as_Load(), n_ctrl, ctrl); 1943 } 1944 IdealLoopTree *u_loop = get_loop(ctrl); 1945 if (u_loop == n_loop) { 1946 return false; // Found loop-varying use 1947 } 1948 if (n_loop->is_member(u_loop)) { 1949 return false; // Found use in inner loop 1950 } 1951 // Sinking a node from a pre loop to its main loop pins the node between the pre and main loops. If that node is input 1952 // to a check that's eliminated by range check elimination, it becomes input to an expression that feeds into the exit 1953 // test of the pre loop above the point in the graph where it's pinned. 1954 if (n_loop->_head->is_CountedLoop() && n_loop->_head->as_CountedLoop()->is_pre_loop()) { 1955 CountedLoopNode* pre_loop = n_loop->_head->as_CountedLoop(); 1956 if (is_dominator(pre_loop->loopexit(), ctrl)) { 1957 return false; 1958 } 1959 } 1960 return true; 1961 } 1962 1963 //------------------------------split_if_with_blocks--------------------------- 1964 // Check for aggressive application of 'split-if' optimization, 1965 // using basic block level info. 1966 void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack) { 1967 Node* root = C->root(); 1968 visited.set(root->_idx); // first, mark root as visited 1969 // Do pre-visit work for root 1970 Node* n = split_if_with_blocks_pre(root); 1971 uint cnt = n->outcnt(); 1972 uint i = 0; 1973 1974 while (true) { 1975 // Visit all children 1976 if (i < cnt) { 1977 Node* use = n->raw_out(i); 1978 ++i; 1979 if (use->outcnt() != 0 && !visited.test_set(use->_idx)) { 1980 // Now do pre-visit work for this use 1981 use = split_if_with_blocks_pre(use); 1982 nstack.push(n, i); // Save parent and next use's index. 1983 n = use; // Process all children of current use. 1984 cnt = use->outcnt(); 1985 i = 0; 1986 } 1987 } 1988 else { 1989 // All of n's children have been processed, complete post-processing. 1990 if (cnt != 0 && !n->is_Con()) { 1991 assert(has_node(n), "no dead nodes"); 1992 split_if_with_blocks_post(n); 1993 if (C->failing()) { 1994 return; 1995 } 1996 } 1997 if (must_throttle_split_if()) { 1998 nstack.clear(); 1999 } 2000 if (nstack.is_empty()) { 2001 // Finished all nodes on stack. 2002 break; 2003 } 2004 // Get saved parent node and next use's index. Visit the rest of uses. 2005 n = nstack.node(); 2006 cnt = n->outcnt(); 2007 i = nstack.index(); 2008 nstack.pop(); 2009 } 2010 } 2011 } 2012 2013 2014 //============================================================================= 2015 // 2016 // C L O N E A L O O P B O D Y 2017 // 2018 2019 //------------------------------clone_iff-------------------------------------- 2020 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. 2021 // "Nearly" because all Nodes have been cloned from the original in the loop, 2022 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs 2023 // through the Phi recursively, and return a Bool. 2024 Node* PhaseIdealLoop::clone_iff(PhiNode* phi) { 2025 2026 // Convert this Phi into a Phi merging Bools 2027 uint i; 2028 for (i = 1; i < phi->req(); i++) { 2029 Node* b = phi->in(i); 2030 if (b->is_Phi()) { 2031 _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi())); 2032 } else { 2033 assert(b->is_Bool() || b->is_OpaqueNotNull() || b->is_OpaqueInitializedAssertionPredicate(), 2034 "bool, non-null check with OpaqueNotNull or Initialized Assertion Predicate with its Opaque node"); 2035 } 2036 } 2037 Node* n = phi->in(1); 2038 Node* sample_opaque = nullptr; 2039 Node *sample_bool = nullptr; 2040 if (n->is_OpaqueNotNull() || n->is_OpaqueInitializedAssertionPredicate()) { 2041 sample_opaque = n; 2042 sample_bool = n->in(1); 2043 assert(sample_bool->is_Bool(), "wrong type"); 2044 } else { 2045 sample_bool = n; 2046 } 2047 Node *sample_cmp = sample_bool->in(1); 2048 2049 // Make Phis to merge the Cmp's inputs. 2050 PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP); 2051 PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP); 2052 for (i = 1; i < phi->req(); i++) { 2053 Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1); 2054 Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2); 2055 phi1->set_req(i, n1); 2056 phi2->set_req(i, n2); 2057 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type())); 2058 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type())); 2059 } 2060 // See if these Phis have been made before. 2061 // Register with optimizer 2062 Node *hit1 = _igvn.hash_find_insert(phi1); 2063 if (hit1) { // Hit, toss just made Phi 2064 _igvn.remove_dead_node(phi1); // Remove new phi 2065 assert(hit1->is_Phi(), "" ); 2066 phi1 = (PhiNode*)hit1; // Use existing phi 2067 } else { // Miss 2068 _igvn.register_new_node_with_optimizer(phi1); 2069 } 2070 Node *hit2 = _igvn.hash_find_insert(phi2); 2071 if (hit2) { // Hit, toss just made Phi 2072 _igvn.remove_dead_node(phi2); // Remove new phi 2073 assert(hit2->is_Phi(), "" ); 2074 phi2 = (PhiNode*)hit2; // Use existing phi 2075 } else { // Miss 2076 _igvn.register_new_node_with_optimizer(phi2); 2077 } 2078 // Register Phis with loop/block info 2079 set_ctrl(phi1, phi->in(0)); 2080 set_ctrl(phi2, phi->in(0)); 2081 // Make a new Cmp 2082 Node *cmp = sample_cmp->clone(); 2083 cmp->set_req(1, phi1); 2084 cmp->set_req(2, phi2); 2085 _igvn.register_new_node_with_optimizer(cmp); 2086 set_ctrl(cmp, phi->in(0)); 2087 2088 // Make a new Bool 2089 Node *b = sample_bool->clone(); 2090 b->set_req(1,cmp); 2091 _igvn.register_new_node_with_optimizer(b); 2092 set_ctrl(b, phi->in(0)); 2093 2094 if (sample_opaque != nullptr) { 2095 Node* opaque = sample_opaque->clone(); 2096 opaque->set_req(1, b); 2097 _igvn.register_new_node_with_optimizer(opaque); 2098 set_ctrl(opaque, phi->in(0)); 2099 return opaque; 2100 } 2101 2102 assert(b->is_Bool(), ""); 2103 return b; 2104 } 2105 2106 //------------------------------clone_bool------------------------------------- 2107 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. 2108 // "Nearly" because all Nodes have been cloned from the original in the loop, 2109 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs 2110 // through the Phi recursively, and return a Bool. 2111 CmpNode*PhaseIdealLoop::clone_bool(PhiNode* phi) { 2112 uint i; 2113 // Convert this Phi into a Phi merging Bools 2114 for( i = 1; i < phi->req(); i++ ) { 2115 Node *b = phi->in(i); 2116 if( b->is_Phi() ) { 2117 _igvn.replace_input_of(phi, i, clone_bool(b->as_Phi())); 2118 } else { 2119 assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" ); 2120 } 2121 } 2122 2123 Node *sample_cmp = phi->in(1); 2124 2125 // Make Phis to merge the Cmp's inputs. 2126 PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP ); 2127 PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP ); 2128 for( uint j = 1; j < phi->req(); j++ ) { 2129 Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP 2130 Node *n1, *n2; 2131 if( cmp_top->is_Cmp() ) { 2132 n1 = cmp_top->in(1); 2133 n2 = cmp_top->in(2); 2134 } else { 2135 n1 = n2 = cmp_top; 2136 } 2137 phi1->set_req( j, n1 ); 2138 phi2->set_req( j, n2 ); 2139 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type())); 2140 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type())); 2141 } 2142 2143 // See if these Phis have been made before. 2144 // Register with optimizer 2145 Node *hit1 = _igvn.hash_find_insert(phi1); 2146 if( hit1 ) { // Hit, toss just made Phi 2147 _igvn.remove_dead_node(phi1); // Remove new phi 2148 assert( hit1->is_Phi(), "" ); 2149 phi1 = (PhiNode*)hit1; // Use existing phi 2150 } else { // Miss 2151 _igvn.register_new_node_with_optimizer(phi1); 2152 } 2153 Node *hit2 = _igvn.hash_find_insert(phi2); 2154 if( hit2 ) { // Hit, toss just made Phi 2155 _igvn.remove_dead_node(phi2); // Remove new phi 2156 assert( hit2->is_Phi(), "" ); 2157 phi2 = (PhiNode*)hit2; // Use existing phi 2158 } else { // Miss 2159 _igvn.register_new_node_with_optimizer(phi2); 2160 } 2161 // Register Phis with loop/block info 2162 set_ctrl(phi1, phi->in(0)); 2163 set_ctrl(phi2, phi->in(0)); 2164 // Make a new Cmp 2165 Node *cmp = sample_cmp->clone(); 2166 cmp->set_req( 1, phi1 ); 2167 cmp->set_req( 2, phi2 ); 2168 _igvn.register_new_node_with_optimizer(cmp); 2169 set_ctrl(cmp, phi->in(0)); 2170 2171 assert( cmp->is_Cmp(), "" ); 2172 return (CmpNode*)cmp; 2173 } 2174 2175 void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new, 2176 IdealLoopTree* loop, IdealLoopTree* outer_loop, 2177 Node_List*& split_if_set, Node_List*& split_bool_set, 2178 Node_List*& split_cex_set, Node_List& worklist, 2179 uint new_counter, CloneLoopMode mode) { 2180 Node* nnn = old_new[old->_idx]; 2181 // Copy uses to a worklist, so I can munge the def-use info 2182 // with impunity. 2183 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) 2184 worklist.push(old->fast_out(j)); 2185 2186 while( worklist.size() ) { 2187 Node *use = worklist.pop(); 2188 if (!has_node(use)) continue; // Ignore dead nodes 2189 if (use->in(0) == C->top()) continue; 2190 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use ); 2191 // Check for data-use outside of loop - at least one of OLD or USE 2192 // must not be a CFG node. 2193 #ifdef ASSERT 2194 if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == nullptr) { 2195 Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint(); 2196 assert(mode != IgnoreStripMined, "incorrect cloning mode"); 2197 assert((mode == ControlAroundStripMined && use == sfpt) || !use->is_reachable_from_root(), "missed a node"); 2198 } 2199 #endif 2200 if (!loop->is_member(use_loop) && !outer_loop->is_member(use_loop) && (!old->is_CFG() || !use->is_CFG())) { 2201 2202 // If the Data use is an IF, that means we have an IF outside the 2203 // loop that is switching on a condition that is set inside the 2204 // loop. Happens if people set a loop-exit flag; then test the flag 2205 // in the loop to break the loop, then test is again outside the 2206 // loop to determine which way the loop exited. 2207 // 2208 // For several uses we need to make sure that there is no phi between, 2209 // the use and the Bool/Cmp. We therefore clone the Bool/Cmp down here 2210 // to avoid such a phi in between. 2211 // For example, it is unexpected that there is a Phi between an 2212 // AllocateArray node and its ValidLengthTest input that could cause 2213 // split if to break. 2214 assert(!use->is_OpaqueTemplateAssertionPredicate(), 2215 "should not clone a Template Assertion Predicate which should be removed once it's useless"); 2216 if (use->is_If() || use->is_CMove() || use->is_OpaqueNotNull() || use->is_OpaqueInitializedAssertionPredicate() || 2217 (use->Opcode() == Op_AllocateArray && use->in(AllocateNode::ValidLengthTest) == old)) { 2218 // Since this code is highly unlikely, we lazily build the worklist 2219 // of such Nodes to go split. 2220 if (!split_if_set) { 2221 split_if_set = new Node_List(); 2222 } 2223 split_if_set->push(use); 2224 } 2225 if (use->is_Bool()) { 2226 if (!split_bool_set) { 2227 split_bool_set = new Node_List(); 2228 } 2229 split_bool_set->push(use); 2230 } 2231 if (use->Opcode() == Op_CreateEx) { 2232 if (!split_cex_set) { 2233 split_cex_set = new Node_List(); 2234 } 2235 split_cex_set->push(use); 2236 } 2237 2238 2239 // Get "block" use is in 2240 uint idx = 0; 2241 while( use->in(idx) != old ) idx++; 2242 Node *prev = use->is_CFG() ? use : get_ctrl(use); 2243 assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" ); 2244 Node* cfg = (prev->_idx >= new_counter && prev->is_Region()) 2245 ? prev->in(2) 2246 : idom(prev); 2247 if( use->is_Phi() ) // Phi use is in prior block 2248 cfg = prev->in(idx); // NOT in block of Phi itself 2249 if (cfg->is_top()) { // Use is dead? 2250 _igvn.replace_input_of(use, idx, C->top()); 2251 continue; 2252 } 2253 2254 // If use is referenced through control edge... (idx == 0) 2255 if (mode == IgnoreStripMined && idx == 0) { 2256 LoopNode *head = loop->_head->as_Loop(); 2257 if (head->is_strip_mined() && is_dominator(head->outer_loop_exit(), prev)) { 2258 // That node is outside the inner loop, leave it outside the 2259 // outer loop as well to not confuse verification code. 2260 assert(!loop->_parent->is_member(use_loop), "should be out of the outer loop"); 2261 _igvn.replace_input_of(use, 0, head->outer_loop_exit()); 2262 continue; 2263 } 2264 } 2265 2266 while(!outer_loop->is_member(get_loop(cfg))) { 2267 prev = cfg; 2268 cfg = (cfg->_idx >= new_counter && cfg->is_Region()) ? cfg->in(2) : idom(cfg); 2269 } 2270 // If the use occurs after merging several exits from the loop, then 2271 // old value must have dominated all those exits. Since the same old 2272 // value was used on all those exits we did not need a Phi at this 2273 // merge point. NOW we do need a Phi here. Each loop exit value 2274 // is now merged with the peeled body exit; each exit gets its own 2275 // private Phi and those Phis need to be merged here. 2276 Node *phi; 2277 if( prev->is_Region() ) { 2278 if( idx == 0 ) { // Updating control edge? 2279 phi = prev; // Just use existing control 2280 } else { // Else need a new Phi 2281 phi = PhiNode::make( prev, old ); 2282 // Now recursively fix up the new uses of old! 2283 for( uint i = 1; i < prev->req(); i++ ) { 2284 worklist.push(phi); // Onto worklist once for each 'old' input 2285 } 2286 } 2287 } else { 2288 // Get new RegionNode merging old and new loop exits 2289 prev = old_new[prev->_idx]; 2290 assert( prev, "just made this in step 7" ); 2291 if( idx == 0) { // Updating control edge? 2292 phi = prev; // Just use existing control 2293 } else { // Else need a new Phi 2294 // Make a new Phi merging data values properly 2295 phi = PhiNode::make( prev, old ); 2296 phi->set_req( 1, nnn ); 2297 } 2298 } 2299 // If inserting a new Phi, check for prior hits 2300 if( idx != 0 ) { 2301 Node *hit = _igvn.hash_find_insert(phi); 2302 if( hit == nullptr ) { 2303 _igvn.register_new_node_with_optimizer(phi); // Register new phi 2304 } else { // or 2305 // Remove the new phi from the graph and use the hit 2306 _igvn.remove_dead_node(phi); 2307 phi = hit; // Use existing phi 2308 } 2309 set_ctrl(phi, prev); 2310 } 2311 // Make 'use' use the Phi instead of the old loop body exit value 2312 assert(use->in(idx) == old, "old is still input of use"); 2313 // We notify all uses of old, including use, and the indirect uses, 2314 // that may now be optimized because we have replaced old with phi. 2315 _igvn.add_users_to_worklist(old); 2316 if (idx == 0 && 2317 use->depends_only_on_test()) { 2318 Node* pinned_clone = use->pin_array_access_node(); 2319 if (pinned_clone != nullptr) { 2320 // Pin array access nodes: control is updated here to a region. If, after some transformations, only one path 2321 // into the region is left, an array load could become dependent on a condition that's not a range check for 2322 // that access. If that condition is replaced by an identical dominating one, then an unpinned load would risk 2323 // floating above its range check. 2324 pinned_clone->set_req(0, phi); 2325 register_new_node_with_ctrl_of(pinned_clone, use); 2326 _igvn.replace_node(use, pinned_clone); 2327 continue; 2328 } 2329 } 2330 _igvn.replace_input_of(use, idx, phi); 2331 if( use->_idx >= new_counter ) { // If updating new phis 2332 // Not needed for correctness, but prevents a weak assert 2333 // in AddPNode from tripping (when we end up with different 2334 // base & derived Phis that will become the same after 2335 // IGVN does CSE). 2336 Node *hit = _igvn.hash_find_insert(use); 2337 if( hit ) // Go ahead and re-hash for hits. 2338 _igvn.replace_node( use, hit ); 2339 } 2340 } 2341 } 2342 } 2343 2344 static void collect_nodes_in_outer_loop_not_reachable_from_sfpt(Node* n, const IdealLoopTree *loop, const IdealLoopTree* outer_loop, 2345 const Node_List &old_new, Unique_Node_List& wq, PhaseIdealLoop* phase, 2346 bool check_old_new) { 2347 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 2348 Node* u = n->fast_out(j); 2349 assert(check_old_new || old_new[u->_idx] == nullptr, "shouldn't have been cloned"); 2350 if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == nullptr)) { 2351 Node* c = phase->get_ctrl(u); 2352 IdealLoopTree* u_loop = phase->get_loop(c); 2353 assert(!loop->is_member(u_loop) || !loop->_body.contains(u), "can be in outer loop or out of both loops only"); 2354 if (!loop->is_member(u_loop)) { 2355 if (outer_loop->is_member(u_loop)) { 2356 wq.push(u); 2357 } else { 2358 // nodes pinned with control in the outer loop but not referenced from the safepoint must be moved out of 2359 // the outer loop too 2360 Node* u_c = u->in(0); 2361 if (u_c != nullptr) { 2362 IdealLoopTree* u_c_loop = phase->get_loop(u_c); 2363 if (outer_loop->is_member(u_c_loop) && !loop->is_member(u_c_loop)) { 2364 wq.push(u); 2365 } 2366 } 2367 } 2368 } 2369 } 2370 } 2371 } 2372 2373 void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop, 2374 IdealLoopTree* outer_loop, int dd, Node_List &old_new, 2375 Node_List& extra_data_nodes) { 2376 if (head->is_strip_mined() && mode != IgnoreStripMined) { 2377 CountedLoopNode* cl = head->as_CountedLoop(); 2378 Node* l = cl->outer_loop(); 2379 Node* tail = cl->outer_loop_tail(); 2380 IfNode* le = cl->outer_loop_end(); 2381 Node* sfpt = cl->outer_safepoint(); 2382 CountedLoopEndNode* cle = cl->loopexit(); 2383 CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop(); 2384 CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null(); 2385 Node* cle_out = cle->proj_out(false); 2386 2387 Node* new_sfpt = nullptr; 2388 Node* new_cle_out = cle_out->clone(); 2389 old_new.map(cle_out->_idx, new_cle_out); 2390 if (mode == CloneIncludesStripMined) { 2391 // clone outer loop body 2392 Node* new_l = l->clone(); 2393 Node* new_tail = tail->clone(); 2394 IfNode* new_le = le->clone()->as_If(); 2395 new_sfpt = sfpt->clone(); 2396 2397 set_loop(new_l, outer_loop->_parent); 2398 set_idom(new_l, new_l->in(LoopNode::EntryControl), dd); 2399 set_loop(new_cle_out, outer_loop->_parent); 2400 set_idom(new_cle_out, new_cle, dd); 2401 set_loop(new_sfpt, outer_loop->_parent); 2402 set_idom(new_sfpt, new_cle_out, dd); 2403 set_loop(new_le, outer_loop->_parent); 2404 set_idom(new_le, new_sfpt, dd); 2405 set_loop(new_tail, outer_loop->_parent); 2406 set_idom(new_tail, new_le, dd); 2407 set_idom(new_cl, new_l, dd); 2408 2409 old_new.map(l->_idx, new_l); 2410 old_new.map(tail->_idx, new_tail); 2411 old_new.map(le->_idx, new_le); 2412 old_new.map(sfpt->_idx, new_sfpt); 2413 2414 new_l->set_req(LoopNode::LoopBackControl, new_tail); 2415 new_l->set_req(0, new_l); 2416 new_tail->set_req(0, new_le); 2417 new_le->set_req(0, new_sfpt); 2418 new_sfpt->set_req(0, new_cle_out); 2419 new_cle_out->set_req(0, new_cle); 2420 new_cl->set_req(LoopNode::EntryControl, new_l); 2421 2422 _igvn.register_new_node_with_optimizer(new_l); 2423 _igvn.register_new_node_with_optimizer(new_tail); 2424 _igvn.register_new_node_with_optimizer(new_le); 2425 } else { 2426 Node *newhead = old_new[loop->_head->_idx]; 2427 newhead->as_Loop()->clear_strip_mined(); 2428 _igvn.replace_input_of(newhead, LoopNode::EntryControl, newhead->in(LoopNode::EntryControl)->in(LoopNode::EntryControl)); 2429 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd); 2430 } 2431 // Look at data node that were assigned a control in the outer 2432 // loop: they are kept in the outer loop by the safepoint so start 2433 // from the safepoint node's inputs. 2434 IdealLoopTree* outer_loop = get_loop(l); 2435 Node_Stack stack(2); 2436 stack.push(sfpt, 1); 2437 uint new_counter = C->unique(); 2438 while (stack.size() > 0) { 2439 Node* n = stack.node(); 2440 uint i = stack.index(); 2441 while (i < n->req() && 2442 (n->in(i) == nullptr || 2443 !has_ctrl(n->in(i)) || 2444 get_loop(get_ctrl(n->in(i))) != outer_loop || 2445 (old_new[n->in(i)->_idx] != nullptr && old_new[n->in(i)->_idx]->_idx >= new_counter))) { 2446 i++; 2447 } 2448 if (i < n->req()) { 2449 stack.set_index(i+1); 2450 stack.push(n->in(i), 0); 2451 } else { 2452 assert(old_new[n->_idx] == nullptr || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet"); 2453 Node* m = n == sfpt ? new_sfpt : n->clone(); 2454 if (m != nullptr) { 2455 for (uint i = 0; i < n->req(); i++) { 2456 if (m->in(i) != nullptr && old_new[m->in(i)->_idx] != nullptr) { 2457 m->set_req(i, old_new[m->in(i)->_idx]); 2458 } 2459 } 2460 } else { 2461 assert(n == sfpt && mode != CloneIncludesStripMined, "where's the safepoint clone?"); 2462 } 2463 if (n != sfpt) { 2464 extra_data_nodes.push(n); 2465 _igvn.register_new_node_with_optimizer(m); 2466 assert(get_ctrl(n) == cle_out, "what other control?"); 2467 set_ctrl(m, new_cle_out); 2468 old_new.map(n->_idx, m); 2469 } 2470 stack.pop(); 2471 } 2472 } 2473 if (mode == CloneIncludesStripMined) { 2474 _igvn.register_new_node_with_optimizer(new_sfpt); 2475 _igvn.register_new_node_with_optimizer(new_cle_out); 2476 } 2477 // Some other transformation may have pessimistically assigned some 2478 // data nodes to the outer loop. Set their control so they are out 2479 // of the outer loop. 2480 ResourceMark rm; 2481 Unique_Node_List wq; 2482 for (uint i = 0; i < extra_data_nodes.size(); i++) { 2483 Node* old = extra_data_nodes.at(i); 2484 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true); 2485 } 2486 2487 for (uint i = 0; i < loop->_body.size(); i++) { 2488 Node* old = loop->_body.at(i); 2489 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true); 2490 } 2491 2492 Node* inner_out = sfpt->in(0); 2493 if (inner_out->outcnt() > 1) { 2494 collect_nodes_in_outer_loop_not_reachable_from_sfpt(inner_out, loop, outer_loop, old_new, wq, this, true); 2495 } 2496 2497 Node* new_ctrl = cl->outer_loop_exit(); 2498 assert(get_loop(new_ctrl) != outer_loop, "must be out of the loop nest"); 2499 for (uint i = 0; i < wq.size(); i++) { 2500 Node* n = wq.at(i); 2501 set_ctrl(n, new_ctrl); 2502 if (n->in(0) != nullptr) { 2503 _igvn.replace_input_of(n, 0, new_ctrl); 2504 } 2505 collect_nodes_in_outer_loop_not_reachable_from_sfpt(n, loop, outer_loop, old_new, wq, this, false); 2506 } 2507 } else { 2508 Node *newhead = old_new[loop->_head->_idx]; 2509 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd); 2510 } 2511 } 2512 2513 //------------------------------clone_loop------------------------------------- 2514 // 2515 // C L O N E A L O O P B O D Y 2516 // 2517 // This is the basic building block of the loop optimizations. It clones an 2518 // entire loop body. It makes an old_new loop body mapping; with this mapping 2519 // you can find the new-loop equivalent to an old-loop node. All new-loop 2520 // nodes are exactly equal to their old-loop counterparts, all edges are the 2521 // same. All exits from the old-loop now have a RegionNode that merges the 2522 // equivalent new-loop path. This is true even for the normal "loop-exit" 2523 // condition. All uses of loop-invariant old-loop values now come from (one 2524 // or more) Phis that merge their new-loop equivalents. 2525 // 2526 // This operation leaves the graph in an illegal state: there are two valid 2527 // control edges coming from the loop pre-header to both loop bodies. I'll 2528 // definitely have to hack the graph after running this transform. 2529 // 2530 // From this building block I will further edit edges to perform loop peeling 2531 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc. 2532 // 2533 // Parameter side_by_size_idom: 2534 // When side_by_size_idom is null, the dominator tree is constructed for 2535 // the clone loop to dominate the original. Used in construction of 2536 // pre-main-post loop sequence. 2537 // When nonnull, the clone and original are side-by-side, both are 2538 // dominated by the side_by_side_idom node. Used in construction of 2539 // unswitched loops. 2540 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd, 2541 CloneLoopMode mode, Node* side_by_side_idom) { 2542 2543 LoopNode* head = loop->_head->as_Loop(); 2544 head->verify_strip_mined(1); 2545 2546 if (C->do_vector_loop() && PrintOpto) { 2547 const char* mname = C->method()->name()->as_quoted_ascii(); 2548 if (mname != nullptr) { 2549 tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname); 2550 } 2551 } 2552 2553 CloneMap& cm = C->clone_map(); 2554 if (C->do_vector_loop()) { 2555 cm.set_clone_idx(cm.max_gen()+1); 2556 #ifndef PRODUCT 2557 if (PrintOpto) { 2558 tty->print_cr("PhaseIdealLoop::clone_loop: _clone_idx %d", cm.clone_idx()); 2559 loop->dump_head(); 2560 } 2561 #endif 2562 } 2563 2564 // Step 1: Clone the loop body. Make the old->new mapping. 2565 clone_loop_body(loop->_body, old_new, &cm); 2566 2567 IdealLoopTree* outer_loop = (head->is_strip_mined() && mode != IgnoreStripMined) ? get_loop(head->as_CountedLoop()->outer_loop()) : loop; 2568 2569 // Step 2: Fix the edges in the new body. If the old input is outside the 2570 // loop use it. If the old input is INside the loop, use the corresponding 2571 // new node instead. 2572 fix_body_edges(loop->_body, loop, old_new, dd, outer_loop->_parent, false); 2573 2574 Node_List extra_data_nodes; // data nodes in the outer strip mined loop 2575 clone_outer_loop(head, mode, loop, outer_loop, dd, old_new, extra_data_nodes); 2576 2577 // Step 3: Now fix control uses. Loop varying control uses have already 2578 // been fixed up (as part of all input edges in Step 2). Loop invariant 2579 // control uses must be either an IfFalse or an IfTrue. Make a merge 2580 // point to merge the old and new IfFalse/IfTrue nodes; make the use 2581 // refer to this. 2582 Node_List worklist; 2583 uint new_counter = C->unique(); 2584 fix_ctrl_uses(loop->_body, loop, old_new, mode, side_by_side_idom, &cm, worklist); 2585 2586 // Step 4: If loop-invariant use is not control, it must be dominated by a 2587 // loop exit IfFalse/IfTrue. Find "proper" loop exit. Make a Region 2588 // there if needed. Make a Phi there merging old and new used values. 2589 Node_List *split_if_set = nullptr; 2590 Node_List *split_bool_set = nullptr; 2591 Node_List *split_cex_set = nullptr; 2592 fix_data_uses(loop->_body, loop, mode, outer_loop, new_counter, old_new, worklist, split_if_set, split_bool_set, split_cex_set); 2593 2594 for (uint i = 0; i < extra_data_nodes.size(); i++) { 2595 Node* old = extra_data_nodes.at(i); 2596 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set, 2597 split_bool_set, split_cex_set, worklist, new_counter, 2598 mode); 2599 } 2600 2601 // Check for IFs that need splitting/cloning. Happens if an IF outside of 2602 // the loop uses a condition set in the loop. The original IF probably 2603 // takes control from one or more OLD Regions (which in turn get from NEW 2604 // Regions). In any case, there will be a set of Phis for each merge point 2605 // from the IF up to where the original BOOL def exists the loop. 2606 finish_clone_loop(split_if_set, split_bool_set, split_cex_set); 2607 2608 } 2609 2610 void PhaseIdealLoop::finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set) { 2611 if (split_if_set) { 2612 while (split_if_set->size()) { 2613 Node *iff = split_if_set->pop(); 2614 uint input = iff->Opcode() == Op_AllocateArray ? AllocateNode::ValidLengthTest : 1; 2615 if (iff->in(input)->is_Phi()) { 2616 Node *b = clone_iff(iff->in(input)->as_Phi()); 2617 _igvn.replace_input_of(iff, input, b); 2618 } 2619 } 2620 } 2621 if (split_bool_set) { 2622 while (split_bool_set->size()) { 2623 Node *b = split_bool_set->pop(); 2624 Node *phi = b->in(1); 2625 assert(phi->is_Phi(), ""); 2626 CmpNode *cmp = clone_bool((PhiNode*) phi); 2627 _igvn.replace_input_of(b, 1, cmp); 2628 } 2629 } 2630 if (split_cex_set) { 2631 while (split_cex_set->size()) { 2632 Node *b = split_cex_set->pop(); 2633 assert(b->in(0)->is_Region(), ""); 2634 assert(b->in(1)->is_Phi(), ""); 2635 assert(b->in(0)->in(0) == b->in(1)->in(0), ""); 2636 split_up(b, b->in(0), nullptr); 2637 } 2638 } 2639 } 2640 2641 void PhaseIdealLoop::fix_data_uses(Node_List& body, IdealLoopTree* loop, CloneLoopMode mode, IdealLoopTree* outer_loop, 2642 uint new_counter, Node_List &old_new, Node_List &worklist, Node_List*& split_if_set, 2643 Node_List*& split_bool_set, Node_List*& split_cex_set) { 2644 for(uint i = 0; i < body.size(); i++ ) { 2645 Node* old = body.at(i); 2646 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set, 2647 split_bool_set, split_cex_set, worklist, new_counter, 2648 mode); 2649 } 2650 } 2651 2652 void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* loop, Node_List &old_new, CloneLoopMode mode, 2653 Node* side_by_side_idom, CloneMap* cm, Node_List &worklist) { 2654 LoopNode* head = loop->_head->as_Loop(); 2655 for(uint i = 0; i < body.size(); i++ ) { 2656 Node* old = body.at(i); 2657 if( !old->is_CFG() ) continue; 2658 2659 // Copy uses to a worklist, so I can munge the def-use info 2660 // with impunity. 2661 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) { 2662 worklist.push(old->fast_out(j)); 2663 } 2664 2665 while (worklist.size()) { // Visit all uses 2666 Node *use = worklist.pop(); 2667 if (!has_node(use)) continue; // Ignore dead nodes 2668 IdealLoopTree *use_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use ); 2669 if (!loop->is_member(use_loop) && use->is_CFG()) { 2670 // Both OLD and USE are CFG nodes here. 2671 assert(use->is_Proj(), "" ); 2672 Node* nnn = old_new[old->_idx]; 2673 2674 Node* newuse = nullptr; 2675 if (head->is_strip_mined() && mode != IgnoreStripMined) { 2676 CountedLoopNode* cl = head->as_CountedLoop(); 2677 CountedLoopEndNode* cle = cl->loopexit(); 2678 Node* cle_out = cle->proj_out_or_null(false); 2679 if (use == cle_out) { 2680 IfNode* le = cl->outer_loop_end(); 2681 use = le->proj_out(false); 2682 use_loop = get_loop(use); 2683 if (mode == CloneIncludesStripMined) { 2684 nnn = old_new[le->_idx]; 2685 } else { 2686 newuse = old_new[cle_out->_idx]; 2687 } 2688 } 2689 } 2690 if (newuse == nullptr) { 2691 newuse = use->clone(); 2692 } 2693 2694 // Clone the loop exit control projection 2695 if (C->do_vector_loop() && cm != nullptr) { 2696 cm->verify_insert_and_clone(use, newuse, cm->clone_idx()); 2697 } 2698 newuse->set_req(0,nnn); 2699 _igvn.register_new_node_with_optimizer(newuse); 2700 set_loop(newuse, use_loop); 2701 set_idom(newuse, nnn, dom_depth(nnn) + 1 ); 2702 2703 // We need a Region to merge the exit from the peeled body and the 2704 // exit from the old loop body. 2705 RegionNode *r = new RegionNode(3); 2706 uint dd_r = MIN2(dom_depth(newuse), dom_depth(use)); 2707 assert(dd_r >= dom_depth(dom_lca(newuse, use)), "" ); 2708 2709 // The original user of 'use' uses 'r' instead. 2710 for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) { 2711 Node* useuse = use->last_out(l); 2712 _igvn.rehash_node_delayed(useuse); 2713 uint uses_found = 0; 2714 if (useuse->in(0) == use) { 2715 useuse->set_req(0, r); 2716 uses_found++; 2717 if (useuse->is_CFG()) { 2718 // This is not a dom_depth > dd_r because when new 2719 // control flow is constructed by a loop opt, a node and 2720 // its dominator can end up at the same dom_depth 2721 assert(dom_depth(useuse) >= dd_r, ""); 2722 set_idom(useuse, r, dom_depth(useuse)); 2723 } 2724 } 2725 for (uint k = 1; k < useuse->req(); k++) { 2726 if( useuse->in(k) == use ) { 2727 useuse->set_req(k, r); 2728 uses_found++; 2729 if (useuse->is_Loop() && k == LoopNode::EntryControl) { 2730 // This is not a dom_depth > dd_r because when new 2731 // control flow is constructed by a loop opt, a node 2732 // and its dominator can end up at the same dom_depth 2733 assert(dom_depth(useuse) >= dd_r , ""); 2734 set_idom(useuse, r, dom_depth(useuse)); 2735 } 2736 } 2737 } 2738 l -= uses_found; // we deleted 1 or more copies of this edge 2739 } 2740 2741 assert(use->is_Proj(), "loop exit should be projection"); 2742 // lazy_replace() below moves all nodes that are: 2743 // - control dependent on the loop exit or 2744 // - have control set to the loop exit 2745 // below the post-loop merge point. lazy_replace() takes a dead control as first input. To make it 2746 // possible to use it, the loop exit projection is cloned and becomes the new exit projection. The initial one 2747 // becomes dead and is "replaced" by the region. 2748 Node* use_clone = use->clone(); 2749 register_control(use_clone, use_loop, idom(use), dom_depth(use)); 2750 // Now finish up 'r' 2751 r->set_req(1, newuse); 2752 r->set_req(2, use_clone); 2753 _igvn.register_new_node_with_optimizer(r); 2754 set_loop(r, use_loop); 2755 set_idom(r, (side_by_side_idom == nullptr) ? newuse->in(0) : side_by_side_idom, dd_r); 2756 lazy_replace(use, r); 2757 // Map the (cloned) old use to the new merge point 2758 old_new.map(use_clone->_idx, r); 2759 } // End of if a loop-exit test 2760 } 2761 } 2762 } 2763 2764 void PhaseIdealLoop::fix_body_edges(const Node_List &body, IdealLoopTree* loop, const Node_List &old_new, int dd, 2765 IdealLoopTree* parent, bool partial) { 2766 for(uint i = 0; i < body.size(); i++ ) { 2767 Node *old = body.at(i); 2768 Node *nnn = old_new[old->_idx]; 2769 // Fix CFG/Loop controlling the new node 2770 if (has_ctrl(old)) { 2771 set_ctrl(nnn, old_new[get_ctrl(old)->_idx]); 2772 } else { 2773 set_loop(nnn, parent); 2774 if (old->outcnt() > 0) { 2775 Node* dom = idom(old); 2776 if (old_new[dom->_idx] != nullptr) { 2777 dom = old_new[dom->_idx]; 2778 set_idom(nnn, dom, dd ); 2779 } 2780 } 2781 } 2782 // Correct edges to the new node 2783 for (uint j = 0; j < nnn->req(); j++) { 2784 Node *n = nnn->in(j); 2785 if (n != nullptr) { 2786 IdealLoopTree *old_in_loop = get_loop(has_ctrl(n) ? get_ctrl(n) : n); 2787 if (loop->is_member(old_in_loop)) { 2788 if (old_new[n->_idx] != nullptr) { 2789 nnn->set_req(j, old_new[n->_idx]); 2790 } else { 2791 assert(!body.contains(n), ""); 2792 assert(partial, "node not cloned"); 2793 } 2794 } 2795 } 2796 } 2797 _igvn.hash_find_insert(nnn); 2798 } 2799 } 2800 2801 void PhaseIdealLoop::clone_loop_body(const Node_List& body, Node_List &old_new, CloneMap* cm) { 2802 for (uint i = 0; i < body.size(); i++) { 2803 Node* old = body.at(i); 2804 Node* nnn = old->clone(); 2805 old_new.map(old->_idx, nnn); 2806 if (C->do_vector_loop() && cm != nullptr) { 2807 cm->verify_insert_and_clone(old, nnn, cm->clone_idx()); 2808 } 2809 _igvn.register_new_node_with_optimizer(nnn); 2810 } 2811 } 2812 2813 2814 //---------------------- stride_of_possible_iv ------------------------------------- 2815 // Looks for an iff/bool/comp with one operand of the compare 2816 // being a cycle involving an add and a phi, 2817 // with an optional truncation (left-shift followed by a right-shift) 2818 // of the add. Returns zero if not an iv. 2819 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) { 2820 Node* trunc1 = nullptr; 2821 Node* trunc2 = nullptr; 2822 const TypeInteger* ttype = nullptr; 2823 if (!iff->is_If() || iff->in(1) == nullptr || !iff->in(1)->is_Bool()) { 2824 return 0; 2825 } 2826 BoolNode* bl = iff->in(1)->as_Bool(); 2827 Node* cmp = bl->in(1); 2828 if (!cmp || (cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU)) { 2829 return 0; 2830 } 2831 // Must have an invariant operand 2832 if (is_member(get_loop(iff), get_ctrl(cmp->in(2)))) { 2833 return 0; 2834 } 2835 Node* add2 = nullptr; 2836 Node* cmp1 = cmp->in(1); 2837 if (cmp1->is_Phi()) { 2838 // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) ))) 2839 Node* phi = cmp1; 2840 for (uint i = 1; i < phi->req(); i++) { 2841 Node* in = phi->in(i); 2842 Node* add = CountedLoopNode::match_incr_with_optional_truncation(in, 2843 &trunc1, &trunc2, &ttype, T_INT); 2844 if (add && add->in(1) == phi) { 2845 add2 = add->in(2); 2846 break; 2847 } 2848 } 2849 } else { 2850 // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) ))) 2851 Node* addtrunc = cmp1; 2852 Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc, 2853 &trunc1, &trunc2, &ttype, T_INT); 2854 if (add && add->in(1)->is_Phi()) { 2855 Node* phi = add->in(1); 2856 for (uint i = 1; i < phi->req(); i++) { 2857 if (phi->in(i) == addtrunc) { 2858 add2 = add->in(2); 2859 break; 2860 } 2861 } 2862 } 2863 } 2864 if (add2 != nullptr) { 2865 const TypeInt* add2t = _igvn.type(add2)->is_int(); 2866 if (add2t->is_con()) { 2867 return add2t->get_con(); 2868 } 2869 } 2870 return 0; 2871 } 2872 2873 2874 //---------------------- stay_in_loop ------------------------------------- 2875 // Return the (unique) control output node that's in the loop (if it exists.) 2876 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) { 2877 Node* unique = nullptr; 2878 if (!n) return nullptr; 2879 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2880 Node* use = n->fast_out(i); 2881 if (!has_ctrl(use) && loop->is_member(get_loop(use))) { 2882 if (unique != nullptr) { 2883 return nullptr; 2884 } 2885 unique = use; 2886 } 2887 } 2888 return unique; 2889 } 2890 2891 //------------------------------ register_node ------------------------------------- 2892 // Utility to register node "n" with PhaseIdealLoop 2893 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree* loop, Node* pred, uint ddepth) { 2894 _igvn.register_new_node_with_optimizer(n); 2895 loop->_body.push(n); 2896 if (n->is_CFG()) { 2897 set_loop(n, loop); 2898 set_idom(n, pred, ddepth); 2899 } else { 2900 set_ctrl(n, pred); 2901 } 2902 } 2903 2904 //------------------------------ proj_clone ------------------------------------- 2905 // Utility to create an if-projection 2906 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) { 2907 ProjNode* c = p->clone()->as_Proj(); 2908 c->set_req(0, iff); 2909 return c; 2910 } 2911 2912 //------------------------------ short_circuit_if ------------------------------------- 2913 // Force the iff control output to be the live_proj 2914 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) { 2915 guarantee(live_proj != nullptr, "null projection"); 2916 int proj_con = live_proj->_con; 2917 assert(proj_con == 0 || proj_con == 1, "false or true projection"); 2918 Node* con = intcon(proj_con); 2919 if (iff) { 2920 iff->set_req(1, con); 2921 } 2922 return con; 2923 } 2924 2925 //------------------------------ insert_if_before_proj ------------------------------------- 2926 // Insert a new if before an if projection (* - new node) 2927 // 2928 // before 2929 // if(test) 2930 // / \ 2931 // v v 2932 // other-proj proj (arg) 2933 // 2934 // after 2935 // if(test) 2936 // / \ 2937 // / v 2938 // | * proj-clone 2939 // v | 2940 // other-proj v 2941 // * new_if(relop(cmp[IU](left,right))) 2942 // / \ 2943 // v v 2944 // * new-proj proj 2945 // (returned) 2946 // 2947 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) { 2948 IfNode* iff = proj->in(0)->as_If(); 2949 IdealLoopTree *loop = get_loop(proj); 2950 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj(); 2951 uint ddepth = dom_depth(proj); 2952 2953 _igvn.rehash_node_delayed(iff); 2954 _igvn.rehash_node_delayed(proj); 2955 2956 proj->set_req(0, nullptr); // temporary disconnect 2957 ProjNode* proj2 = proj_clone(proj, iff); 2958 register_node(proj2, loop, iff, ddepth); 2959 2960 Node* cmp = Signed ? (Node*) new CmpINode(left, right) : (Node*) new CmpUNode(left, right); 2961 register_node(cmp, loop, proj2, ddepth); 2962 2963 BoolNode* bol = new BoolNode(cmp, relop); 2964 register_node(bol, loop, proj2, ddepth); 2965 2966 int opcode = iff->Opcode(); 2967 assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode"); 2968 IfNode* new_if = IfNode::make_with_same_profile(iff, proj2, bol); 2969 register_node(new_if, loop, proj2, ddepth); 2970 2971 proj->set_req(0, new_if); // reattach 2972 set_idom(proj, new_if, ddepth); 2973 2974 ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj(); 2975 guarantee(new_exit != nullptr, "null exit node"); 2976 register_node(new_exit, get_loop(other_proj), new_if, ddepth); 2977 2978 return new_exit; 2979 } 2980 2981 //------------------------------ insert_region_before_proj ------------------------------------- 2982 // Insert a region before an if projection (* - new node) 2983 // 2984 // before 2985 // if(test) 2986 // / | 2987 // v | 2988 // proj v 2989 // other-proj 2990 // 2991 // after 2992 // if(test) 2993 // / | 2994 // v | 2995 // * proj-clone v 2996 // | other-proj 2997 // v 2998 // * new-region 2999 // | 3000 // v 3001 // * dum_if 3002 // / \ 3003 // v \ 3004 // * dum-proj v 3005 // proj 3006 // 3007 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) { 3008 IfNode* iff = proj->in(0)->as_If(); 3009 IdealLoopTree *loop = get_loop(proj); 3010 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj(); 3011 uint ddepth = dom_depth(proj); 3012 3013 _igvn.rehash_node_delayed(iff); 3014 _igvn.rehash_node_delayed(proj); 3015 3016 proj->set_req(0, nullptr); // temporary disconnect 3017 ProjNode* proj2 = proj_clone(proj, iff); 3018 register_node(proj2, loop, iff, ddepth); 3019 3020 RegionNode* reg = new RegionNode(2); 3021 reg->set_req(1, proj2); 3022 register_node(reg, loop, iff, ddepth); 3023 3024 IfNode* dum_if = new IfNode(reg, short_circuit_if(nullptr, proj), iff->_prob, iff->_fcnt); 3025 register_node(dum_if, loop, reg, ddepth); 3026 3027 proj->set_req(0, dum_if); // reattach 3028 set_idom(proj, dum_if, ddepth); 3029 3030 ProjNode* dum_proj = proj_clone(other_proj, dum_if); 3031 register_node(dum_proj, loop, dum_if, ddepth); 3032 3033 return reg; 3034 } 3035 3036 // Idea 3037 // ---- 3038 // Partial Peeling tries to rotate the loop in such a way that it can later be turned into a counted loop. Counted loops 3039 // require a signed loop exit test. When calling this method, we've only found a suitable unsigned test to partial peel 3040 // with. Therefore, we try to split off a signed loop exit test from the unsigned test such that it can be used as new 3041 // loop exit while keeping the unsigned test unchanged and preserving the same behavior as if we've used the unsigned 3042 // test alone instead: 3043 // 3044 // Before Partial Peeling: 3045 // Loop: 3046 // <peeled section> 3047 // Split off signed loop exit test 3048 // <-- CUT HERE --> 3049 // Unchanged unsigned loop exit test 3050 // <rest of unpeeled section> 3051 // goto Loop 3052 // 3053 // After Partial Peeling: 3054 // <cloned peeled section> 3055 // Cloned split off signed loop exit test 3056 // Loop: 3057 // Unchanged unsigned loop exit test 3058 // <rest of unpeeled section> 3059 // <peeled section> 3060 // Split off signed loop exit test 3061 // goto Loop 3062 // 3063 // Details 3064 // ------- 3065 // Before: 3066 // if (i <u limit) Unsigned loop exit condition 3067 // / | 3068 // v v 3069 // exit-proj stay-in-loop-proj 3070 // 3071 // Split off a signed loop exit test (i.e. with CmpI) from an unsigned loop exit test (i.e. with CmpU) and insert it 3072 // before the CmpU on the stay-in-loop path and keep both tests: 3073 // 3074 // if (i <u limit) Signed loop exit test 3075 // / | 3076 // / if (i <u limit) Unsigned loop exit test 3077 // / / | 3078 // v v v 3079 // exit-region stay-in-loop-proj 3080 // 3081 // Implementation 3082 // -------------- 3083 // We need to make sure that the new signed loop exit test is properly inserted into the graph such that the unsigned 3084 // loop exit test still dominates the same set of control nodes, the ctrl() relation from data nodes to both loop 3085 // exit tests is preserved, and their loop nesting is correct. 3086 // 3087 // To achieve that, we clone the unsigned loop exit test completely (leave it unchanged), insert the signed loop exit 3088 // test above it and kill the original unsigned loop exit test by setting it's condition to a constant 3089 // (i.e. stay-in-loop-const in graph below) such that IGVN can fold it later: 3090 // 3091 // if (stay-in-loop-const) Killed original unsigned loop exit test 3092 // / | 3093 // / v 3094 // / if (i < limit) Split off signed loop exit test 3095 // / / | 3096 // / / v 3097 // / / if (i <u limit) Cloned unsigned loop exit test 3098 // / / / | 3099 // v v v | 3100 // exit-region | 3101 // | | 3102 // dummy-if | 3103 // / | | 3104 // dead | | 3105 // v v 3106 // exit-proj stay-in-loop-proj 3107 // 3108 // Note: The dummy-if is inserted to create a region to merge the loop exits between the original to be killed unsigned 3109 // loop exit test and its exit projection while keeping the exit projection (also see insert_region_before_proj()). 3110 // 3111 // Requirements 3112 // ------------ 3113 // Note that we can only split off a signed loop exit test from the unsigned loop exit test when the behavior is exactly 3114 // the same as before with only a single unsigned test. This is only possible if certain requirements are met. 3115 // Otherwise, we need to bail out (see comments in the code below). 3116 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree* loop) { 3117 const bool Signed = true; 3118 const bool Unsigned = false; 3119 3120 BoolNode* bol = if_cmpu->in(1)->as_Bool(); 3121 if (bol->_test._test != BoolTest::lt) { 3122 return nullptr; 3123 } 3124 CmpNode* cmpu = bol->in(1)->as_Cmp(); 3125 assert(cmpu->Opcode() == Op_CmpU, "must be unsigned comparison"); 3126 3127 int stride = stride_of_possible_iv(if_cmpu); 3128 if (stride == 0) { 3129 return nullptr; 3130 } 3131 3132 Node* lp_proj = stay_in_loop(if_cmpu, loop); 3133 guarantee(lp_proj != nullptr, "null loop node"); 3134 3135 ProjNode* lp_continue = lp_proj->as_Proj(); 3136 ProjNode* lp_exit = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj(); 3137 if (!lp_exit->is_IfFalse()) { 3138 // The loop exit condition is (i <u limit) ==> (i >= 0 && i < limit). 3139 // We therefore can't add a single exit condition. 3140 return nullptr; 3141 } 3142 // The unsigned loop exit condition is 3143 // !(i <u limit) 3144 // = i >=u limit 3145 // 3146 // First, we note that for any x for which 3147 // 0 <= x <= INT_MAX 3148 // we can convert x to an unsigned int and still get the same guarantee: 3149 // 0 <= (uint) x <= INT_MAX = (uint) INT_MAX 3150 // 0 <=u (uint) x <=u INT_MAX = (uint) INT_MAX (LEMMA) 3151 // 3152 // With that in mind, if 3153 // limit >= 0 (COND) 3154 // then the unsigned loop exit condition 3155 // i >=u limit (ULE) 3156 // is equivalent to 3157 // i < 0 || i >= limit (SLE-full) 3158 // because either i is negative and therefore always greater than MAX_INT when converting to unsigned 3159 // (uint) i >=u MAX_INT >= limit >= 0 3160 // or otherwise 3161 // i >= limit >= 0 3162 // holds due to (LEMMA). 3163 // 3164 // For completeness, a counterexample with limit < 0: 3165 // Assume i = -3 and limit = -2: 3166 // i < 0 3167 // -2 < 0 3168 // is true and thus also "i < 0 || i >= limit". But 3169 // i >=u limit 3170 // -3 >=u -2 3171 // is false. 3172 Node* limit = cmpu->in(2); 3173 const TypeInt* type_limit = _igvn.type(limit)->is_int(); 3174 if (type_limit->_lo < 0) { 3175 return nullptr; 3176 } 3177 3178 // We prove below that we can extract a single signed loop exit condition from (SLE-full), depending on the stride: 3179 // stride < 0: 3180 // i < 0 (SLE = SLE-negative) 3181 // stride > 0: 3182 // i >= limit (SLE = SLE-positive) 3183 // such that we have the following graph before Partial Peeling with stride > 0 (similar for stride < 0): 3184 // 3185 // Loop: 3186 // <peeled section> 3187 // i >= limit (SLE-positive) 3188 // <-- CUT HERE --> 3189 // i >=u limit (ULE) 3190 // <rest of unpeeled section> 3191 // goto Loop 3192 // 3193 // We exit the loop if: 3194 // (SLE) is true OR (ULE) is true 3195 // However, if (SLE) is true then (ULE) also needs to be true to ensure the exact same behavior. Otherwise, we wrongly 3196 // exit a loop that should not have been exited if we did not apply Partial Peeling. More formally, we need to ensure: 3197 // (SLE) IMPLIES (ULE) 3198 // This indeed holds when (COND) is given: 3199 // - stride > 0: 3200 // i >= limit // (SLE = SLE-positive) 3201 // i >= limit >= 0 // (COND) 3202 // i >=u limit >= 0 // (LEMMA) 3203 // which is the unsigned loop exit condition (ULE). 3204 // - stride < 0: 3205 // i < 0 // (SLE = SLE-negative) 3206 // (uint) i >u MAX_INT // (NEG) all negative values are greater than MAX_INT when converted to unsigned 3207 // MAX_INT >= limit >= 0 // (COND) 3208 // MAX_INT >=u limit >= 0 // (LEMMA) 3209 // and thus from (NEG) and (LEMMA): 3210 // i >=u limit 3211 // which is the unsigned loop exit condition (ULE). 3212 // 3213 // 3214 // After Partial Peeling, we have the following structure for stride > 0 (similar for stride < 0): 3215 // <cloned peeled section> 3216 // i >= limit (SLE-positive) 3217 // Loop: 3218 // i >=u limit (ULE) 3219 // <rest of unpeeled section> 3220 // <peeled section> 3221 // i >= limit (SLE-positive) 3222 // goto Loop 3223 Node* rhs_cmpi; 3224 if (stride > 0) { 3225 rhs_cmpi = limit; // For i >= limit 3226 } else { 3227 rhs_cmpi = makecon(TypeInt::ZERO); // For i < 0 3228 } 3229 // Create a new region on the exit path 3230 RegionNode* reg = insert_region_before_proj(lp_exit); 3231 guarantee(reg != nullptr, "null region node"); 3232 3233 // Clone the if-cmpu-true-false using a signed compare 3234 BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge; 3235 ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, rhs_cmpi, lp_continue); 3236 reg->add_req(cmpi_exit); 3237 3238 // Clone the if-cmpu-true-false 3239 BoolTest::mask rel_u = bol->_test._test; 3240 ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue); 3241 reg->add_req(cmpu_exit); 3242 3243 // Force original if to stay in loop. 3244 short_circuit_if(if_cmpu, lp_continue); 3245 3246 return cmpi_exit->in(0)->as_If(); 3247 } 3248 3249 //------------------------------ remove_cmpi_loop_exit ------------------------------------- 3250 // Remove a previously inserted signed compare loop exit. 3251 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) { 3252 Node* lp_proj = stay_in_loop(if_cmp, loop); 3253 assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI && 3254 stay_in_loop(lp_proj, loop)->is_If() && 3255 stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu"); 3256 Node* con = makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO); 3257 if_cmp->set_req(1, con); 3258 } 3259 3260 //------------------------------ scheduled_nodelist ------------------------------------- 3261 // Create a post order schedule of nodes that are in the 3262 // "member" set. The list is returned in "sched". 3263 // The first node in "sched" is the loop head, followed by 3264 // nodes which have no inputs in the "member" set, and then 3265 // followed by the nodes that have an immediate input dependence 3266 // on a node in "sched". 3267 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) { 3268 3269 assert(member.test(loop->_head->_idx), "loop head must be in member set"); 3270 VectorSet visited; 3271 Node_Stack nstack(loop->_body.size()); 3272 3273 Node* n = loop->_head; // top of stack is cached in "n" 3274 uint idx = 0; 3275 visited.set(n->_idx); 3276 3277 // Initially push all with no inputs from within member set 3278 for(uint i = 0; i < loop->_body.size(); i++ ) { 3279 Node *elt = loop->_body.at(i); 3280 if (member.test(elt->_idx)) { 3281 bool found = false; 3282 for (uint j = 0; j < elt->req(); j++) { 3283 Node* def = elt->in(j); 3284 if (def && member.test(def->_idx) && def != elt) { 3285 found = true; 3286 break; 3287 } 3288 } 3289 if (!found && elt != loop->_head) { 3290 nstack.push(n, idx); 3291 n = elt; 3292 assert(!visited.test(n->_idx), "not seen yet"); 3293 visited.set(n->_idx); 3294 } 3295 } 3296 } 3297 3298 // traverse out's that are in the member set 3299 while (true) { 3300 if (idx < n->outcnt()) { 3301 Node* use = n->raw_out(idx); 3302 idx++; 3303 if (!visited.test_set(use->_idx)) { 3304 if (member.test(use->_idx)) { 3305 nstack.push(n, idx); 3306 n = use; 3307 idx = 0; 3308 } 3309 } 3310 } else { 3311 // All outputs processed 3312 sched.push(n); 3313 if (nstack.is_empty()) break; 3314 n = nstack.node(); 3315 idx = nstack.index(); 3316 nstack.pop(); 3317 } 3318 } 3319 } 3320 3321 3322 //------------------------------ has_use_in_set ------------------------------------- 3323 // Has a use in the vector set 3324 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) { 3325 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 3326 Node* use = n->fast_out(j); 3327 if (vset.test(use->_idx)) { 3328 return true; 3329 } 3330 } 3331 return false; 3332 } 3333 3334 3335 //------------------------------ has_use_internal_to_set ------------------------------------- 3336 // Has use internal to the vector set (ie. not in a phi at the loop head) 3337 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) { 3338 Node* head = loop->_head; 3339 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 3340 Node* use = n->fast_out(j); 3341 if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) { 3342 return true; 3343 } 3344 } 3345 return false; 3346 } 3347 3348 3349 //------------------------------ clone_for_use_outside_loop ------------------------------------- 3350 // clone "n" for uses that are outside of loop 3351 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) { 3352 int cloned = 0; 3353 assert(worklist.size() == 0, "should be empty"); 3354 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 3355 Node* use = n->fast_out(j); 3356 if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) { 3357 worklist.push(use); 3358 } 3359 } 3360 3361 if (C->check_node_count(worklist.size() + NodeLimitFudgeFactor, 3362 "Too many clones required in clone_for_use_outside_loop in partial peeling")) { 3363 return -1; 3364 } 3365 3366 while( worklist.size() ) { 3367 Node *use = worklist.pop(); 3368 if (!has_node(use) || use->in(0) == C->top()) continue; 3369 uint j; 3370 for (j = 0; j < use->req(); j++) { 3371 if (use->in(j) == n) break; 3372 } 3373 assert(j < use->req(), "must be there"); 3374 3375 // clone "n" and insert it between the inputs of "n" and the use outside the loop 3376 Node* n_clone = n->clone(); 3377 _igvn.replace_input_of(use, j, n_clone); 3378 cloned++; 3379 Node* use_c; 3380 if (!use->is_Phi()) { 3381 use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0); 3382 } else { 3383 // Use in a phi is considered a use in the associated predecessor block 3384 use_c = use->in(0)->in(j); 3385 } 3386 set_ctrl(n_clone, use_c); 3387 assert(!loop->is_member(get_loop(use_c)), "should be outside loop"); 3388 get_loop(use_c)->_body.push(n_clone); 3389 _igvn.register_new_node_with_optimizer(n_clone); 3390 #ifndef PRODUCT 3391 if (TracePartialPeeling) { 3392 tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx); 3393 } 3394 #endif 3395 } 3396 return cloned; 3397 } 3398 3399 3400 //------------------------------ clone_for_special_use_inside_loop ------------------------------------- 3401 // clone "n" for special uses that are in the not_peeled region. 3402 // If these def-uses occur in separate blocks, the code generator 3403 // marks the method as not compilable. For example, if a "BoolNode" 3404 // is in a different basic block than the "IfNode" that uses it, then 3405 // the compilation is aborted in the code generator. 3406 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n, 3407 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) { 3408 if (n->is_Phi() || n->is_Load()) { 3409 return; 3410 } 3411 assert(worklist.size() == 0, "should be empty"); 3412 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 3413 Node* use = n->fast_out(j); 3414 if ( not_peel.test(use->_idx) && 3415 (use->is_If() || use->is_CMove() || use->is_Bool()) && 3416 use->in(1) == n) { 3417 worklist.push(use); 3418 } 3419 } 3420 if (worklist.size() > 0) { 3421 // clone "n" and insert it between inputs of "n" and the use 3422 Node* n_clone = n->clone(); 3423 loop->_body.push(n_clone); 3424 _igvn.register_new_node_with_optimizer(n_clone); 3425 set_ctrl(n_clone, get_ctrl(n)); 3426 sink_list.push(n_clone); 3427 not_peel.set(n_clone->_idx); 3428 #ifndef PRODUCT 3429 if (TracePartialPeeling) { 3430 tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx); 3431 } 3432 #endif 3433 while( worklist.size() ) { 3434 Node *use = worklist.pop(); 3435 _igvn.rehash_node_delayed(use); 3436 for (uint j = 1; j < use->req(); j++) { 3437 if (use->in(j) == n) { 3438 use->set_req(j, n_clone); 3439 } 3440 } 3441 } 3442 } 3443 } 3444 3445 3446 //------------------------------ insert_phi_for_loop ------------------------------------- 3447 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist 3448 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) { 3449 Node *phi = PhiNode::make(lp, back_edge_val); 3450 phi->set_req(LoopNode::EntryControl, lp_entry_val); 3451 // Use existing phi if it already exists 3452 Node *hit = _igvn.hash_find_insert(phi); 3453 if( hit == nullptr ) { 3454 _igvn.register_new_node_with_optimizer(phi); 3455 set_ctrl(phi, lp); 3456 } else { 3457 // Remove the new phi from the graph and use the hit 3458 _igvn.remove_dead_node(phi); 3459 phi = hit; 3460 } 3461 _igvn.replace_input_of(use, idx, phi); 3462 } 3463 3464 #ifdef ASSERT 3465 //------------------------------ is_valid_loop_partition ------------------------------------- 3466 // Validate the loop partition sets: peel and not_peel 3467 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, 3468 VectorSet& not_peel ) { 3469 uint i; 3470 // Check that peel_list entries are in the peel set 3471 for (i = 0; i < peel_list.size(); i++) { 3472 if (!peel.test(peel_list.at(i)->_idx)) { 3473 return false; 3474 } 3475 } 3476 // Check at loop members are in one of peel set or not_peel set 3477 for (i = 0; i < loop->_body.size(); i++ ) { 3478 Node *def = loop->_body.at(i); 3479 uint di = def->_idx; 3480 // Check that peel set elements are in peel_list 3481 if (peel.test(di)) { 3482 if (not_peel.test(di)) { 3483 return false; 3484 } 3485 // Must be in peel_list also 3486 bool found = false; 3487 for (uint j = 0; j < peel_list.size(); j++) { 3488 if (peel_list.at(j)->_idx == di) { 3489 found = true; 3490 break; 3491 } 3492 } 3493 if (!found) { 3494 return false; 3495 } 3496 } else if (not_peel.test(di)) { 3497 if (peel.test(di)) { 3498 return false; 3499 } 3500 } else { 3501 return false; 3502 } 3503 } 3504 return true; 3505 } 3506 3507 //------------------------------ is_valid_clone_loop_exit_use ------------------------------------- 3508 // Ensure a use outside of loop is of the right form 3509 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) { 3510 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use; 3511 return (use->is_Phi() && 3512 use_c->is_Region() && use_c->req() == 3 && 3513 (use_c->in(exit_idx)->Opcode() == Op_IfTrue || 3514 use_c->in(exit_idx)->Opcode() == Op_IfFalse || 3515 use_c->in(exit_idx)->Opcode() == Op_JumpProj) && 3516 loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) ); 3517 } 3518 3519 //------------------------------ is_valid_clone_loop_form ------------------------------------- 3520 // Ensure that all uses outside of loop are of the right form 3521 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list, 3522 uint orig_exit_idx, uint clone_exit_idx) { 3523 uint len = peel_list.size(); 3524 for (uint i = 0; i < len; i++) { 3525 Node *def = peel_list.at(i); 3526 3527 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { 3528 Node *use = def->fast_out(j); 3529 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use; 3530 if (!loop->is_member(get_loop(use_c))) { 3531 // use is not in the loop, check for correct structure 3532 if (use->in(0) == def) { 3533 // Okay 3534 } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) { 3535 return false; 3536 } 3537 } 3538 } 3539 } 3540 return true; 3541 } 3542 #endif 3543 3544 //------------------------------ partial_peel ------------------------------------- 3545 // Partially peel (aka loop rotation) the top portion of a loop (called 3546 // the peel section below) by cloning it and placing one copy just before 3547 // the new loop head and the other copy at the bottom of the new loop. 3548 // 3549 // before after where it came from 3550 // 3551 // stmt1 stmt1 3552 // loop: stmt2 clone 3553 // stmt2 if condA goto exitA clone 3554 // if condA goto exitA new_loop: new 3555 // stmt3 stmt3 clone 3556 // if !condB goto loop if condB goto exitB clone 3557 // exitB: stmt2 orig 3558 // stmt4 if !condA goto new_loop orig 3559 // exitA: goto exitA 3560 // exitB: 3561 // stmt4 3562 // exitA: 3563 // 3564 // Step 1: find the cut point: an exit test on probable 3565 // induction variable. 3566 // Step 2: schedule (with cloning) operations in the peel 3567 // section that can be executed after the cut into 3568 // the section that is not peeled. This may need 3569 // to clone operations into exit blocks. For 3570 // instance, a reference to A[i] in the not-peel 3571 // section and a reference to B[i] in an exit block 3572 // may cause a left-shift of i by 2 to be placed 3573 // in the peel block. This step will clone the left 3574 // shift into the exit block and sink the left shift 3575 // from the peel to the not-peel section. 3576 // Step 3: clone the loop, retarget the control, and insert 3577 // phis for values that are live across the new loop 3578 // head. This is very dependent on the graph structure 3579 // from clone_loop. It creates region nodes for 3580 // exit control and associated phi nodes for values 3581 // flow out of the loop through that exit. The region 3582 // node is dominated by the clone's control projection. 3583 // So the clone's peel section is placed before the 3584 // new loop head, and the clone's not-peel section is 3585 // forms the top part of the new loop. The original 3586 // peel section forms the tail of the new loop. 3587 // Step 4: update the dominator tree and recompute the 3588 // dominator depth. 3589 // 3590 // orig 3591 // 3592 // stmt1 3593 // | 3594 // v 3595 // predicates 3596 // | 3597 // v 3598 // loop<----+ 3599 // | | 3600 // stmt2 | 3601 // | | 3602 // v | 3603 // ifA | 3604 // / | | 3605 // v v | 3606 // false true ^ <-- last_peel 3607 // / | | 3608 // / ===|==cut | 3609 // / stmt3 | <-- first_not_peel 3610 // / | | 3611 // | v | 3612 // v ifB | 3613 // exitA: / \ | 3614 // / \ | 3615 // v v | 3616 // false true | 3617 // / \ | 3618 // / ----+ 3619 // | 3620 // v 3621 // exitB: 3622 // stmt4 3623 // 3624 // 3625 // after clone loop 3626 // 3627 // stmt1 3628 // | 3629 // v 3630 // predicates 3631 // / \ 3632 // clone / \ orig 3633 // / \ 3634 // / \ 3635 // v v 3636 // +---->loop loop<----+ 3637 // | | | | 3638 // | stmt2 stmt2 | 3639 // | | | | 3640 // | v v | 3641 // | ifA ifA | 3642 // | | \ / | | 3643 // | v v v v | 3644 // ^ true false false true ^ <-- last_peel 3645 // | | ^ \ / | | 3646 // | cut==|== \ \ / ===|==cut | 3647 // | stmt3 \ \ / stmt3 | <-- first_not_peel 3648 // | | dom | | | | 3649 // | v \ 1v v2 v | 3650 // | ifB regionA ifB | 3651 // | / \ | / \ | 3652 // | / \ v / \ | 3653 // | v v exitA: v v | 3654 // | true false false true | 3655 // | / ^ \ / \ | 3656 // +---- \ \ / ----+ 3657 // dom \ / 3658 // \ 1v v2 3659 // regionB 3660 // | 3661 // v 3662 // exitB: 3663 // stmt4 3664 // 3665 // 3666 // after partial peel 3667 // 3668 // stmt1 3669 // | 3670 // v 3671 // predicates 3672 // / 3673 // clone / orig 3674 // / TOP 3675 // / \ 3676 // v v 3677 // TOP->loop loop----+ 3678 // | | | 3679 // stmt2 stmt2 | 3680 // | | | 3681 // v v | 3682 // ifA ifA | 3683 // | \ / | | 3684 // v v v v | 3685 // true false false true | <-- last_peel 3686 // | ^ \ / +------|---+ 3687 // +->newloop \ \ / === ==cut | | 3688 // | stmt3 \ \ / TOP | | 3689 // | | dom | | stmt3 | | <-- first_not_peel 3690 // | v \ 1v v2 v | | 3691 // | ifB regionA ifB ^ v 3692 // | / \ | / \ | | 3693 // | / \ v / \ | | 3694 // | v v exitA: v v | | 3695 // | true false false true | | 3696 // | / ^ \ / \ | | 3697 // | | \ \ / v | | 3698 // | | dom \ / TOP | | 3699 // | | \ 1v v2 | | 3700 // ^ v regionB | | 3701 // | | | | | 3702 // | | v ^ v 3703 // | | exitB: | | 3704 // | | stmt4 | | 3705 // | +------------>-----------------+ | 3706 // | | 3707 // +-----------------<---------------------+ 3708 // 3709 // 3710 // final graph 3711 // 3712 // stmt1 3713 // | 3714 // v 3715 // predicates 3716 // | 3717 // v 3718 // stmt2 clone 3719 // | 3720 // v 3721 // ........> ifA clone 3722 // : / | 3723 // dom / | 3724 // : v v 3725 // : false true 3726 // : | | 3727 // : | v 3728 // : | newloop<-----+ 3729 // : | | | 3730 // : | stmt3 clone | 3731 // : | | | 3732 // : | v | 3733 // : | ifB | 3734 // : | / \ | 3735 // : | v v | 3736 // : | false true | 3737 // : | | | | 3738 // : | v stmt2 | 3739 // : | exitB: | | 3740 // : | stmt4 v | 3741 // : | ifA orig | 3742 // : | / \ | 3743 // : | / \ | 3744 // : | v v | 3745 // : | false true | 3746 // : | / \ | 3747 // : v v -----+ 3748 // RegionA 3749 // | 3750 // v 3751 // exitA 3752 // 3753 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { 3754 3755 assert(!loop->_head->is_CountedLoop(), "Non-counted loop only"); 3756 if (!loop->_head->is_Loop()) { 3757 return false; 3758 } 3759 LoopNode *head = loop->_head->as_Loop(); 3760 3761 if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) { 3762 return false; 3763 } 3764 3765 // Check for complex exit control 3766 for (uint ii = 0; ii < loop->_body.size(); ii++) { 3767 Node *n = loop->_body.at(ii); 3768 int opc = n->Opcode(); 3769 if (n->is_Call() || 3770 opc == Op_Catch || 3771 opc == Op_CatchProj || 3772 opc == Op_Jump || 3773 opc == Op_JumpProj) { 3774 #ifndef PRODUCT 3775 if (TracePartialPeeling) { 3776 tty->print_cr("\nExit control too complex: lp: %d", head->_idx); 3777 } 3778 #endif 3779 return false; 3780 } 3781 } 3782 3783 int dd = dom_depth(head); 3784 3785 // Step 1: find cut point 3786 3787 // Walk up dominators to loop head looking for first loop exit 3788 // which is executed on every path thru loop. 3789 IfNode *peel_if = nullptr; 3790 IfNode *peel_if_cmpu = nullptr; 3791 3792 Node *iff = loop->tail(); 3793 while (iff != head) { 3794 if (iff->is_If()) { 3795 Node *ctrl = get_ctrl(iff->in(1)); 3796 if (ctrl->is_top()) return false; // Dead test on live IF. 3797 // If loop-varying exit-test, check for induction variable 3798 if (loop->is_member(get_loop(ctrl)) && 3799 loop->is_loop_exit(iff) && 3800 is_possible_iv_test(iff)) { 3801 Node* cmp = iff->in(1)->in(1); 3802 if (cmp->Opcode() == Op_CmpI) { 3803 peel_if = iff->as_If(); 3804 } else { 3805 assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU"); 3806 peel_if_cmpu = iff->as_If(); 3807 } 3808 } 3809 } 3810 iff = idom(iff); 3811 } 3812 3813 // Prefer signed compare over unsigned compare. 3814 IfNode* new_peel_if = nullptr; 3815 if (peel_if == nullptr) { 3816 if (!PartialPeelAtUnsignedTests || peel_if_cmpu == nullptr) { 3817 return false; // No peel point found 3818 } 3819 new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop); 3820 if (new_peel_if == nullptr) { 3821 return false; // No peel point found 3822 } 3823 peel_if = new_peel_if; 3824 } 3825 Node* last_peel = stay_in_loop(peel_if, loop); 3826 Node* first_not_peeled = stay_in_loop(last_peel, loop); 3827 if (first_not_peeled == nullptr || first_not_peeled == head) { 3828 return false; 3829 } 3830 3831 #ifndef PRODUCT 3832 if (TraceLoopOpts) { 3833 tty->print("PartialPeel "); 3834 loop->dump_head(); 3835 } 3836 3837 if (TracePartialPeeling) { 3838 tty->print_cr("before partial peel one iteration"); 3839 Node_List wl; 3840 Node* t = head->in(2); 3841 while (true) { 3842 wl.push(t); 3843 if (t == head) break; 3844 t = idom(t); 3845 } 3846 while (wl.size() > 0) { 3847 Node* tt = wl.pop(); 3848 tt->dump(); 3849 if (tt == last_peel) tty->print_cr("-- cut --"); 3850 } 3851 } 3852 #endif 3853 3854 C->print_method(PHASE_BEFORE_PARTIAL_PEELING, 4, head); 3855 3856 VectorSet peel; 3857 VectorSet not_peel; 3858 Node_List peel_list; 3859 Node_List worklist; 3860 Node_List sink_list; 3861 3862 uint estimate = loop->est_loop_clone_sz(1); 3863 if (exceeding_node_budget(estimate)) { 3864 return false; 3865 } 3866 3867 // Set of cfg nodes to peel are those that are executable from 3868 // the head through last_peel. 3869 assert(worklist.size() == 0, "should be empty"); 3870 worklist.push(head); 3871 peel.set(head->_idx); 3872 while (worklist.size() > 0) { 3873 Node *n = worklist.pop(); 3874 if (n != last_peel) { 3875 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 3876 Node* use = n->fast_out(j); 3877 if (use->is_CFG() && 3878 loop->is_member(get_loop(use)) && 3879 !peel.test_set(use->_idx)) { 3880 worklist.push(use); 3881 } 3882 } 3883 } 3884 } 3885 3886 // Set of non-cfg nodes to peel are those that are control 3887 // dependent on the cfg nodes. 3888 for (uint i = 0; i < loop->_body.size(); i++) { 3889 Node *n = loop->_body.at(i); 3890 Node *n_c = has_ctrl(n) ? get_ctrl(n) : n; 3891 if (peel.test(n_c->_idx)) { 3892 peel.set(n->_idx); 3893 } else { 3894 not_peel.set(n->_idx); 3895 } 3896 } 3897 3898 // Step 2: move operations from the peeled section down into the 3899 // not-peeled section 3900 3901 // Get a post order schedule of nodes in the peel region 3902 // Result in right-most operand. 3903 scheduled_nodelist(loop, peel, peel_list); 3904 3905 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition"); 3906 3907 // For future check for too many new phis 3908 uint old_phi_cnt = 0; 3909 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { 3910 Node* use = head->fast_out(j); 3911 if (use->is_Phi()) old_phi_cnt++; 3912 } 3913 3914 #ifndef PRODUCT 3915 if (TracePartialPeeling) { 3916 tty->print_cr("\npeeled list"); 3917 } 3918 #endif 3919 3920 // Evacuate nodes in peel region into the not_peeled region if possible 3921 bool too_many_clones = false; 3922 uint new_phi_cnt = 0; 3923 uint cloned_for_outside_use = 0; 3924 for (uint i = 0; i < peel_list.size();) { 3925 Node* n = peel_list.at(i); 3926 #ifndef PRODUCT 3927 if (TracePartialPeeling) n->dump(); 3928 #endif 3929 bool incr = true; 3930 if (!n->is_CFG()) { 3931 if (has_use_in_set(n, not_peel)) { 3932 // If not used internal to the peeled region, 3933 // move "n" from peeled to not_peeled region. 3934 if (!has_use_internal_to_set(n, peel, loop)) { 3935 // if not pinned and not a load (which maybe anti-dependent on a store) 3936 // and not a CMove (Matcher expects only bool->cmove). 3937 if (n->in(0) == nullptr && !n->is_Load() && !n->is_CMove()) { 3938 int new_clones = clone_for_use_outside_loop(loop, n, worklist); 3939 if (C->failing()) return false; 3940 if (new_clones == -1) { 3941 too_many_clones = true; 3942 break; 3943 } 3944 cloned_for_outside_use += new_clones; 3945 sink_list.push(n); 3946 peel.remove(n->_idx); 3947 not_peel.set(n->_idx); 3948 peel_list.remove(i); 3949 incr = false; 3950 #ifndef PRODUCT 3951 if (TracePartialPeeling) { 3952 tty->print_cr("sink to not_peeled region: %d newbb: %d", 3953 n->_idx, get_ctrl(n)->_idx); 3954 } 3955 #endif 3956 } 3957 } else { 3958 // Otherwise check for special def-use cases that span 3959 // the peel/not_peel boundary such as bool->if 3960 clone_for_special_use_inside_loop(loop, n, not_peel, sink_list, worklist); 3961 new_phi_cnt++; 3962 } 3963 } 3964 } 3965 if (incr) i++; 3966 } 3967 3968 estimate += cloned_for_outside_use + new_phi_cnt; 3969 bool exceed_node_budget = !may_require_nodes(estimate); 3970 bool exceed_phi_limit = new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta; 3971 3972 if (too_many_clones || exceed_node_budget || exceed_phi_limit) { 3973 #ifndef PRODUCT 3974 if (TracePartialPeeling && exceed_phi_limit) { 3975 tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c", 3976 new_phi_cnt, old_phi_cnt, new_peel_if != nullptr?'T':'F'); 3977 } 3978 #endif 3979 if (new_peel_if != nullptr) { 3980 remove_cmpi_loop_exit(new_peel_if, loop); 3981 } 3982 // Inhibit more partial peeling on this loop 3983 assert(!head->is_partial_peel_loop(), "not partial peeled"); 3984 head->mark_partial_peel_failed(); 3985 if (cloned_for_outside_use > 0) { 3986 // Terminate this round of loop opts because 3987 // the graph outside this loop was changed. 3988 C->set_major_progress(); 3989 return true; 3990 } 3991 return false; 3992 } 3993 3994 // Step 3: clone loop, retarget control, and insert new phis 3995 3996 // Create new loop head for new phis and to hang 3997 // the nodes being moved (sinked) from the peel region. 3998 LoopNode* new_head = new LoopNode(last_peel, last_peel); 3999 new_head->set_unswitch_count(head->unswitch_count()); // Preserve 4000 _igvn.register_new_node_with_optimizer(new_head); 4001 assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled"); 4002 _igvn.replace_input_of(first_not_peeled, 0, new_head); 4003 set_loop(new_head, loop); 4004 loop->_body.push(new_head); 4005 not_peel.set(new_head->_idx); 4006 set_idom(new_head, last_peel, dom_depth(first_not_peeled)); 4007 set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled)); 4008 4009 while (sink_list.size() > 0) { 4010 Node* n = sink_list.pop(); 4011 set_ctrl(n, new_head); 4012 } 4013 4014 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition"); 4015 4016 clone_loop(loop, old_new, dd, IgnoreStripMined); 4017 4018 const uint clone_exit_idx = 1; 4019 const uint orig_exit_idx = 2; 4020 assert(is_valid_clone_loop_form(loop, peel_list, orig_exit_idx, clone_exit_idx), "bad clone loop"); 4021 4022 Node* head_clone = old_new[head->_idx]; 4023 LoopNode* new_head_clone = old_new[new_head->_idx]->as_Loop(); 4024 Node* orig_tail_clone = head_clone->in(2); 4025 4026 // Add phi if "def" node is in peel set and "use" is not 4027 4028 for (uint i = 0; i < peel_list.size(); i++) { 4029 Node *def = peel_list.at(i); 4030 if (!def->is_CFG()) { 4031 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { 4032 Node *use = def->fast_out(j); 4033 if (has_node(use) && use->in(0) != C->top() && 4034 (!peel.test(use->_idx) || 4035 (use->is_Phi() && use->in(0) == head)) ) { 4036 worklist.push(use); 4037 } 4038 } 4039 while( worklist.size() ) { 4040 Node *use = worklist.pop(); 4041 for (uint j = 1; j < use->req(); j++) { 4042 Node* n = use->in(j); 4043 if (n == def) { 4044 4045 // "def" is in peel set, "use" is not in peel set 4046 // or "use" is in the entry boundary (a phi) of the peel set 4047 4048 Node* use_c = has_ctrl(use) ? get_ctrl(use) : use; 4049 4050 if ( loop->is_member(get_loop( use_c )) ) { 4051 // use is in loop 4052 if (old_new[use->_idx] != nullptr) { // null for dead code 4053 Node* use_clone = old_new[use->_idx]; 4054 _igvn.replace_input_of(use, j, C->top()); 4055 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone ); 4056 } 4057 } else { 4058 assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format"); 4059 // use is not in the loop, check if the live range includes the cut 4060 Node* lp_if = use_c->in(orig_exit_idx)->in(0); 4061 if (not_peel.test(lp_if->_idx)) { 4062 assert(j == orig_exit_idx, "use from original loop"); 4063 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone ); 4064 } 4065 } 4066 } 4067 } 4068 } 4069 } 4070 } 4071 4072 // Step 3b: retarget control 4073 4074 // Redirect control to the new loop head if a cloned node in 4075 // the not_peeled region has control that points into the peeled region. 4076 // This necessary because the cloned peeled region will be outside 4077 // the loop. 4078 // from to 4079 // cloned-peeled <---+ 4080 // new_head_clone: | <--+ 4081 // cloned-not_peeled in(0) in(0) 4082 // orig-peeled 4083 4084 for (uint i = 0; i < loop->_body.size(); i++) { 4085 Node *n = loop->_body.at(i); 4086 if (!n->is_CFG() && n->in(0) != nullptr && 4087 not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) { 4088 Node* n_clone = old_new[n->_idx]; 4089 if (n_clone->depends_only_on_test()) { 4090 // Pin array access nodes: control is updated here to the loop head. If, after some transformations, the 4091 // backedge is removed, an array load could become dependent on a condition that's not a range check for that 4092 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk 4093 // floating above its range check. 4094 Node* pinned_clone = n_clone->pin_array_access_node(); 4095 if (pinned_clone != nullptr) { 4096 register_new_node_with_ctrl_of(pinned_clone, n_clone); 4097 old_new.map(n->_idx, pinned_clone); 4098 _igvn.replace_node(n_clone, pinned_clone); 4099 n_clone = pinned_clone; 4100 } 4101 } 4102 _igvn.replace_input_of(n_clone, 0, new_head_clone); 4103 } 4104 } 4105 4106 // Backedge of the surviving new_head (the clone) is original last_peel 4107 _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel); 4108 4109 // Cut first node in original not_peel set 4110 _igvn.rehash_node_delayed(new_head); // Multiple edge updates: 4111 new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of 4112 new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls 4113 4114 // Copy head_clone back-branch info to original head 4115 // and remove original head's loop entry and 4116 // clone head's back-branch 4117 _igvn.rehash_node_delayed(head); // Multiple edge updates 4118 head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl)); 4119 head->set_req(LoopNode::LoopBackControl, C->top()); 4120 _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top()); 4121 4122 // Similarly modify the phis 4123 for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) { 4124 Node* use = head->fast_out(k); 4125 if (use->is_Phi() && use->outcnt() > 0) { 4126 Node* use_clone = old_new[use->_idx]; 4127 _igvn.rehash_node_delayed(use); // Multiple edge updates 4128 use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl)); 4129 use->set_req(LoopNode::LoopBackControl, C->top()); 4130 _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top()); 4131 } 4132 } 4133 4134 // Step 4: update dominator tree and dominator depth 4135 4136 set_idom(head, orig_tail_clone, dd); 4137 recompute_dom_depth(); 4138 4139 // Inhibit more partial peeling on this loop 4140 new_head_clone->set_partial_peel_loop(); 4141 C->set_major_progress(); 4142 loop->record_for_igvn(); 4143 4144 #ifndef PRODUCT 4145 if (TracePartialPeeling) { 4146 tty->print_cr("\nafter partial peel one iteration"); 4147 Node_List wl; 4148 Node* t = last_peel; 4149 while (true) { 4150 wl.push(t); 4151 if (t == head_clone) break; 4152 t = idom(t); 4153 } 4154 while (wl.size() > 0) { 4155 Node* tt = wl.pop(); 4156 if (tt == head) tty->print_cr("orig head"); 4157 else if (tt == new_head_clone) tty->print_cr("new head"); 4158 else if (tt == head_clone) tty->print_cr("clone head"); 4159 tt->dump(); 4160 } 4161 } 4162 #endif 4163 4164 C->print_method(PHASE_AFTER_PARTIAL_PEELING, 4, new_head_clone); 4165 4166 return true; 4167 } 4168 4169 // Transform: 4170 // 4171 // loop<-----------------+ 4172 // | | 4173 // stmt1 stmt2 .. stmtn | 4174 // | | | | 4175 // \ | / | 4176 // v v v | 4177 // region | 4178 // | | 4179 // shared_stmt | 4180 // | | 4181 // v | 4182 // if | 4183 // / \ | 4184 // | -----------+ 4185 // v 4186 // 4187 // into: 4188 // 4189 // loop<-------------------+ 4190 // | | 4191 // v | 4192 // +->loop | 4193 // | | | 4194 // | stmt1 stmt2 .. stmtn | 4195 // | | | | | 4196 // | | \ / | 4197 // | | v v | 4198 // | | region1 | 4199 // | | | | 4200 // | shared_stmt shared_stmt | 4201 // | | | | 4202 // | v v | 4203 // | if if | 4204 // | /\ / \ | 4205 // +-- | | -------+ 4206 // \ / 4207 // v v 4208 // region2 4209 // 4210 // (region2 is shown to merge mirrored projections of the loop exit 4211 // ifs to make the diagram clearer but they really merge the same 4212 // projection) 4213 // 4214 // Conditions for this transformation to trigger: 4215 // - the path through stmt1 is frequent enough 4216 // - the inner loop will be turned into a counted loop after transformation 4217 bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new) { 4218 if (!DuplicateBackedge) { 4219 return false; 4220 } 4221 assert(!loop->_head->is_CountedLoop() || StressDuplicateBackedge, "Non-counted loop only"); 4222 if (!loop->_head->is_Loop()) { 4223 return false; 4224 } 4225 4226 uint estimate = loop->est_loop_clone_sz(1); 4227 if (exceeding_node_budget(estimate)) { 4228 return false; 4229 } 4230 4231 LoopNode *head = loop->_head->as_Loop(); 4232 4233 Node* region = nullptr; 4234 IfNode* exit_test = nullptr; 4235 uint inner; 4236 float f; 4237 if (StressDuplicateBackedge) { 4238 if (head->is_strip_mined()) { 4239 return false; 4240 } 4241 Node* c = head->in(LoopNode::LoopBackControl); 4242 4243 while (c != head) { 4244 if (c->is_Region()) { 4245 region = c; 4246 } 4247 c = idom(c); 4248 } 4249 4250 if (region == nullptr) { 4251 return false; 4252 } 4253 4254 inner = 1; 4255 } else { 4256 // Is the shape of the loop that of a counted loop... 4257 Node* back_control = loop_exit_control(head, loop); 4258 if (back_control == nullptr) { 4259 return false; 4260 } 4261 4262 BoolTest::mask bt = BoolTest::illegal; 4263 float cl_prob = 0; 4264 Node* incr = nullptr; 4265 Node* limit = nullptr; 4266 Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob); 4267 if (cmp == nullptr || cmp->Opcode() != Op_CmpI) { 4268 return false; 4269 } 4270 4271 // With an extra phi for the candidate iv? 4272 // Or the region node is the loop head 4273 if (!incr->is_Phi() || incr->in(0) == head) { 4274 return false; 4275 } 4276 4277 PathFrequency pf(head, this); 4278 region = incr->in(0); 4279 4280 // Go over all paths for the extra phi's region and see if that 4281 // path is frequent enough and would match the expected iv shape 4282 // if the extra phi is removed 4283 inner = 0; 4284 for (uint i = 1; i < incr->req(); ++i) { 4285 Node* in = incr->in(i); 4286 Node* trunc1 = nullptr; 4287 Node* trunc2 = nullptr; 4288 const TypeInteger* iv_trunc_t = nullptr; 4289 Node* orig_in = in; 4290 if (!(in = CountedLoopNode::match_incr_with_optional_truncation(in, &trunc1, &trunc2, &iv_trunc_t, T_INT))) { 4291 continue; 4292 } 4293 assert(in->Opcode() == Op_AddI, "wrong increment code"); 4294 Node* xphi = nullptr; 4295 Node* stride = loop_iv_stride(in, loop, xphi); 4296 4297 if (stride == nullptr) { 4298 continue; 4299 } 4300 4301 PhiNode* phi = loop_iv_phi(xphi, nullptr, head, loop); 4302 if (phi == nullptr || 4303 (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) || 4304 (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) { 4305 return false; 4306 } 4307 4308 f = pf.to(region->in(i)); 4309 if (f > 0.5) { 4310 inner = i; 4311 break; 4312 } 4313 } 4314 4315 if (inner == 0) { 4316 return false; 4317 } 4318 4319 exit_test = back_control->in(0)->as_If(); 4320 } 4321 4322 if (idom(region)->is_Catch()) { 4323 return false; 4324 } 4325 4326 // Collect all control nodes that need to be cloned (shared_stmt in the diagram) 4327 Unique_Node_List wq; 4328 wq.push(head->in(LoopNode::LoopBackControl)); 4329 for (uint i = 0; i < wq.size(); i++) { 4330 Node* c = wq.at(i); 4331 assert(get_loop(c) == loop, "not in the right loop?"); 4332 if (c->is_Region()) { 4333 if (c != region) { 4334 for (uint j = 1; j < c->req(); ++j) { 4335 wq.push(c->in(j)); 4336 } 4337 } 4338 } else { 4339 wq.push(c->in(0)); 4340 } 4341 assert(!is_strict_dominator(c, region), "shouldn't go above region"); 4342 } 4343 4344 Node* region_dom = idom(region); 4345 4346 // Can't do the transformation if this would cause a membar pair to 4347 // be split 4348 for (uint i = 0; i < wq.size(); i++) { 4349 Node* c = wq.at(i); 4350 if (c->is_MemBar() && (c->as_MemBar()->trailing_store() || c->as_MemBar()->trailing_load_store())) { 4351 assert(c->as_MemBar()->leading_membar()->trailing_membar() == c, "bad membar pair"); 4352 if (!wq.member(c->as_MemBar()->leading_membar())) { 4353 return false; 4354 } 4355 } 4356 } 4357 4358 // Collect data nodes that need to be clones as well 4359 int dd = dom_depth(head); 4360 4361 for (uint i = 0; i < loop->_body.size(); ++i) { 4362 Node* n = loop->_body.at(i); 4363 if (has_ctrl(n)) { 4364 Node* c = get_ctrl(n); 4365 if (wq.member(c)) { 4366 wq.push(n); 4367 } 4368 } else { 4369 set_idom(n, idom(n), dd); 4370 } 4371 } 4372 4373 // clone shared_stmt 4374 clone_loop_body(wq, old_new, nullptr); 4375 4376 Node* region_clone = old_new[region->_idx]; 4377 region_clone->set_req(inner, C->top()); 4378 set_idom(region, region->in(inner), dd); 4379 4380 // Prepare the outer loop 4381 Node* outer_head = new LoopNode(head->in(LoopNode::EntryControl), old_new[head->in(LoopNode::LoopBackControl)->_idx]); 4382 register_control(outer_head, loop->_parent, outer_head->in(LoopNode::EntryControl)); 4383 _igvn.replace_input_of(head, LoopNode::EntryControl, outer_head); 4384 set_idom(head, outer_head, dd); 4385 4386 fix_body_edges(wq, loop, old_new, dd, loop->_parent, true); 4387 4388 // Make one of the shared_stmt copies only reachable from stmt1, the 4389 // other only from stmt2..stmtn. 4390 Node* dom = nullptr; 4391 for (uint i = 1; i < region->req(); ++i) { 4392 if (i != inner) { 4393 _igvn.replace_input_of(region, i, C->top()); 4394 } 4395 Node* in = region_clone->in(i); 4396 if (in->is_top()) { 4397 continue; 4398 } 4399 if (dom == nullptr) { 4400 dom = in; 4401 } else { 4402 dom = dom_lca(dom, in); 4403 } 4404 } 4405 4406 set_idom(region_clone, dom, dd); 4407 4408 // Set up the outer loop 4409 for (uint i = 0; i < head->outcnt(); i++) { 4410 Node* u = head->raw_out(i); 4411 if (u->is_Phi()) { 4412 Node* outer_phi = u->clone(); 4413 outer_phi->set_req(0, outer_head); 4414 Node* backedge = old_new[u->in(LoopNode::LoopBackControl)->_idx]; 4415 if (backedge == nullptr) { 4416 backedge = u->in(LoopNode::LoopBackControl); 4417 } 4418 outer_phi->set_req(LoopNode::LoopBackControl, backedge); 4419 register_new_node(outer_phi, outer_head); 4420 _igvn.replace_input_of(u, LoopNode::EntryControl, outer_phi); 4421 } 4422 } 4423 4424 // create control and data nodes for out of loop uses (including region2) 4425 Node_List worklist; 4426 uint new_counter = C->unique(); 4427 fix_ctrl_uses(wq, loop, old_new, ControlAroundStripMined, outer_head, nullptr, worklist); 4428 4429 Node_List *split_if_set = nullptr; 4430 Node_List *split_bool_set = nullptr; 4431 Node_List *split_cex_set = nullptr; 4432 fix_data_uses(wq, loop, ControlAroundStripMined, loop->skip_strip_mined(), new_counter, old_new, worklist, 4433 split_if_set, split_bool_set, split_cex_set); 4434 4435 finish_clone_loop(split_if_set, split_bool_set, split_cex_set); 4436 4437 if (exit_test != nullptr) { 4438 float cnt = exit_test->_fcnt; 4439 if (cnt != COUNT_UNKNOWN) { 4440 exit_test->_fcnt = cnt * f; 4441 old_new[exit_test->_idx]->as_If()->_fcnt = cnt * (1 - f); 4442 } 4443 } 4444 4445 C->set_major_progress(); 4446 4447 return true; 4448 } 4449 4450 // AutoVectorize the loop: replace scalar ops with vector ops. 4451 PhaseIdealLoop::AutoVectorizeStatus 4452 PhaseIdealLoop::auto_vectorize(IdealLoopTree* lpt, VSharedData &vshared) { 4453 // Counted loop only 4454 if (!lpt->is_counted()) { 4455 return AutoVectorizeStatus::Impossible; 4456 } 4457 4458 // Main-loop only 4459 CountedLoopNode* cl = lpt->_head->as_CountedLoop(); 4460 if (!cl->is_main_loop()) { 4461 return AutoVectorizeStatus::Impossible; 4462 } 4463 4464 VLoop vloop(lpt, false); 4465 if (!vloop.check_preconditions()) { 4466 return AutoVectorizeStatus::TriedAndFailed; 4467 } 4468 4469 // Ensure the shared data is cleared before each use 4470 vshared.clear(); 4471 4472 const VLoopAnalyzer vloop_analyzer(vloop, vshared); 4473 if (!vloop_analyzer.success()) { 4474 return AutoVectorizeStatus::TriedAndFailed; 4475 } 4476 4477 SuperWord sw(vloop_analyzer); 4478 if (!sw.transform_loop()) { 4479 return AutoVectorizeStatus::TriedAndFailed; 4480 } 4481 4482 return AutoVectorizeStatus::Success; 4483 } 4484 4485 // Just before insert_pre_post_loops, we can multi-version the loop: 4486 // 4487 // multiversion_if 4488 // | | 4489 // fast_loop slow_loop 4490 // 4491 // In the fast_loop we can make speculative assumptions, and put the 4492 // conditions into the multiversion_if. If the conditions hold at runtime, 4493 // we enter the fast_loop, if the conditions fail, we take the slow_loop 4494 // instead which does not make any of the speculative assumptions. 4495 // 4496 // Note: we only multiversion the loop if the loop does not have any 4497 // auto vectorization check Predicate. If we have that predicate, 4498 // then we can simply add the speculative assumption checks to 4499 // that Predicate. This means we do not need to duplicate the 4500 // loop - we have a smaller graph and save compile time. Should 4501 // the conditions ever fail, then we deopt / trap at the Predicate 4502 // and recompile without that Predicate. At that point we will 4503 // multiversion the loop, so that we can still have speculative 4504 // runtime checks. 4505 // 4506 // We perform the multiversioning when the loop is still in its single 4507 // iteration form, even before we insert pre and post loops. This makes 4508 // the cloning much simpler. However, this means that both the fast 4509 // and the slow loop have to be optimized independently (adding pre 4510 // and post loops, unrolling the main loop, auto-vectorize etc.). And 4511 // we may end up not needing any speculative assumptions in the fast_loop 4512 // and then rejecting the slow_loop by constant folding the multiversion_if. 4513 // 4514 // Therefore, we "delay" the optimization of the slow_loop until we add 4515 // at least one speculative assumption for the fast_loop. If we never 4516 // add such a speculative runtime check, the OpaqueMultiversioningNode 4517 // of the multiversion_if constant folds to true after loop opts, and the 4518 // multiversion_if folds away the "delayed" slow_loop. If we add any 4519 // speculative assumption, then we notify the OpaqueMultiversioningNode 4520 // with "notify_slow_loop_that_it_can_resume_optimizations". 4521 // 4522 // Note: new runtime checks can be added to the multiversion_if with 4523 // PhaseIdealLoop::create_new_if_for_multiversion 4524 void PhaseIdealLoop::maybe_multiversion_for_auto_vectorization_runtime_checks(IdealLoopTree* lpt, Node_List& old_new) { 4525 CountedLoopNode* cl = lpt->_head->as_CountedLoop(); 4526 LoopNode* outer_loop = cl->skip_strip_mined(); 4527 Node* entry = outer_loop->in(LoopNode::EntryControl); 4528 4529 // Check we have multiversioning enabled, and are not already multiversioned. 4530 if (!LoopMultiversioning || cl->is_multiversion()) { return; } 4531 4532 // Check that we do not have a parse-predicate where we can add the runtime checks 4533 // during auto-vectorization. 4534 const Predicates predicates(entry); 4535 const PredicateBlock* predicate_block = predicates.auto_vectorization_check_block(); 4536 if (predicate_block->has_parse_predicate()) { return; } 4537 4538 // Check node budget. 4539 uint estimate = lpt->est_loop_clone_sz(2); 4540 if (!may_require_nodes(estimate)) { return; } 4541 4542 do_multiversioning(lpt, old_new); 4543 } 4544 4545 // Returns true if the Reduction node is unordered. 4546 static bool is_unordered_reduction(Node* n) { 4547 return n->is_Reduction() && !n->as_Reduction()->requires_strict_order(); 4548 } 4549 4550 // Having ReductionNodes in the loop is expensive. They need to recursively 4551 // fold together the vector values, for every vectorized loop iteration. If 4552 // we encounter the following pattern, we can vector accumulate the values 4553 // inside the loop, and only have a single UnorderedReduction after the loop. 4554 // 4555 // Note: UnorderedReduction represents a ReductionNode which does not require 4556 // calculating in strict order. 4557 // 4558 // CountedLoop init 4559 // | | 4560 // +------+ | +-----------------------+ 4561 // | | | | 4562 // PhiNode (s) | 4563 // | | 4564 // | Vector | 4565 // | | | 4566 // UnorderedReduction (first_ur) | 4567 // | | 4568 // ... Vector | 4569 // | | | 4570 // UnorderedReduction (last_ur) | 4571 // | | 4572 // +---------------------+ 4573 // 4574 // We patch the graph to look like this: 4575 // 4576 // CountedLoop identity_vector 4577 // | | 4578 // +-------+ | +---------------+ 4579 // | | | | 4580 // PhiNode (v) | 4581 // | | 4582 // | Vector | 4583 // | | | 4584 // VectorAccumulator | 4585 // | | 4586 // ... Vector | 4587 // | | | 4588 // init VectorAccumulator | 4589 // | | | | 4590 // UnorderedReduction +-----------+ 4591 // 4592 // We turned the scalar (s) Phi into a vectorized one (v). In the loop, we 4593 // use vector_accumulators, which do the same reductions, but only element 4594 // wise. This is a single operation per vector_accumulator, rather than many 4595 // for a UnorderedReduction. We can then reduce the last vector_accumulator 4596 // after the loop, and also reduce the init value into it. 4597 // 4598 // We can not do this with all reductions. Some reductions do not allow the 4599 // reordering of operations (for example float addition/multiplication require 4600 // strict order). 4601 void PhaseIdealLoop::move_unordered_reduction_out_of_loop(IdealLoopTree* loop) { 4602 assert(!C->major_progress() && loop->is_counted() && loop->is_innermost(), "sanity"); 4603 4604 // Find all Phi nodes with an unordered Reduction on backedge. 4605 CountedLoopNode* cl = loop->_head->as_CountedLoop(); 4606 for (DUIterator_Fast jmax, j = cl->fast_outs(jmax); j < jmax; j++) { 4607 Node* phi = cl->fast_out(j); 4608 // We have a phi with a single use, and an unordered Reduction on the backedge. 4609 if (!phi->is_Phi() || phi->outcnt() != 1 || !is_unordered_reduction(phi->in(2))) { 4610 continue; 4611 } 4612 4613 ReductionNode* last_ur = phi->in(2)->as_Reduction(); 4614 assert(!last_ur->requires_strict_order(), "must be"); 4615 4616 // Determine types 4617 const TypeVect* vec_t = last_ur->vect_type(); 4618 uint vector_length = vec_t->length(); 4619 BasicType bt = vec_t->element_basic_type(); 4620 4621 // Convert opcode from vector-reduction -> scalar -> normal-vector-op 4622 const int sopc = VectorNode::scalar_opcode(last_ur->Opcode(), bt); 4623 const int vopc = VectorNode::opcode(sopc, bt); 4624 if (!Matcher::match_rule_supported_vector(vopc, vector_length, bt)) { 4625 DEBUG_ONLY( last_ur->dump(); ) 4626 assert(false, "do not have normal vector op for this reduction"); 4627 continue; // not implemented -> fails 4628 } 4629 4630 // Traverse up the chain of unordered Reductions, checking that it loops back to 4631 // the phi. Check that all unordered Reductions only have a single use, except for 4632 // the last (last_ur), which only has phi as a use in the loop, and all other uses 4633 // are outside the loop. 4634 ReductionNode* current = last_ur; 4635 ReductionNode* first_ur = nullptr; 4636 while (true) { 4637 assert(!current->requires_strict_order(), "sanity"); 4638 4639 // Expect no ctrl and a vector_input from within the loop. 4640 Node* ctrl = current->in(0); 4641 Node* vector_input = current->in(2); 4642 if (ctrl != nullptr || get_ctrl(vector_input) != cl) { 4643 DEBUG_ONLY( current->dump(1); ) 4644 assert(false, "reduction has ctrl or bad vector_input"); 4645 break; // Chain traversal fails. 4646 } 4647 4648 assert(current->vect_type() != nullptr, "must have vector type"); 4649 if (current->vect_type() != last_ur->vect_type()) { 4650 // Reductions do not have the same vector type (length and element type). 4651 break; // Chain traversal fails. 4652 } 4653 4654 // Expect single use of an unordered Reduction, except for last_ur. 4655 if (current == last_ur) { 4656 // Expect all uses to be outside the loop, except phi. 4657 for (DUIterator_Fast kmax, k = current->fast_outs(kmax); k < kmax; k++) { 4658 Node* use = current->fast_out(k); 4659 if (use != phi && ctrl_or_self(use) == cl) { 4660 DEBUG_ONLY( current->dump(-1); ) 4661 assert(false, "reduction has use inside loop"); 4662 // Should not be allowed by SuperWord::mark_reductions 4663 return; // bail out of optimization 4664 } 4665 } 4666 } else { 4667 if (current->outcnt() != 1) { 4668 break; // Chain traversal fails. 4669 } 4670 } 4671 4672 // Expect another unordered Reduction or phi as the scalar input. 4673 Node* scalar_input = current->in(1); 4674 if (is_unordered_reduction(scalar_input) && 4675 scalar_input->Opcode() == current->Opcode()) { 4676 // Move up the unordered Reduction chain. 4677 current = scalar_input->as_Reduction(); 4678 assert(!current->requires_strict_order(), "must be"); 4679 } else if (scalar_input == phi) { 4680 // Chain terminates at phi. 4681 first_ur = current; 4682 current = nullptr; 4683 break; // Success. 4684 } else { 4685 // scalar_input is neither phi nor a matching reduction 4686 // Can for example be scalar reduction when we have 4687 // partial vectorization. 4688 break; // Chain traversal fails. 4689 } 4690 } 4691 if (current != nullptr) { 4692 // Chain traversal was not successful. 4693 continue; 4694 } 4695 assert(first_ur != nullptr, "must have successfully terminated chain traversal"); 4696 4697 Node* identity_scalar = ReductionNode::make_identity_con_scalar(_igvn, sopc, bt); 4698 set_root_as_ctrl(identity_scalar); 4699 VectorNode* identity_vector = VectorNode::scalar2vector(identity_scalar, vector_length, bt); 4700 register_new_node(identity_vector, C->root()); 4701 assert(vec_t == identity_vector->vect_type(), "matching vector type"); 4702 VectorNode::trace_new_vector(identity_vector, "Unordered Reduction"); 4703 4704 // Turn the scalar phi into a vector phi. 4705 _igvn.rehash_node_delayed(phi); 4706 Node* init = phi->in(1); // Remember init before replacing it. 4707 phi->set_req_X(1, identity_vector, &_igvn); 4708 phi->as_Type()->set_type(vec_t); 4709 _igvn.set_type(phi, vec_t); 4710 4711 // Traverse down the chain of unordered Reductions, and replace them with vector_accumulators. 4712 current = first_ur; 4713 while (true) { 4714 // Create vector_accumulator to replace current. 4715 Node* last_vector_accumulator = current->in(1); 4716 Node* vector_input = current->in(2); 4717 VectorNode* vector_accumulator = VectorNode::make(vopc, last_vector_accumulator, vector_input, vec_t); 4718 register_new_node(vector_accumulator, cl); 4719 _igvn.replace_node(current, vector_accumulator); 4720 VectorNode::trace_new_vector(vector_accumulator, "Unordered Reduction"); 4721 if (current == last_ur) { 4722 break; 4723 } 4724 current = vector_accumulator->unique_out()->as_Reduction(); 4725 assert(!current->requires_strict_order(), "must be"); 4726 } 4727 4728 // Create post-loop reduction. 4729 Node* last_accumulator = phi->in(2); 4730 Node* post_loop_reduction = ReductionNode::make(sopc, nullptr, init, last_accumulator, bt); 4731 4732 // Take over uses of last_accumulator that are not in the loop. 4733 for (DUIterator i = last_accumulator->outs(); last_accumulator->has_out(i); i++) { 4734 Node* use = last_accumulator->out(i); 4735 if (use != phi && use != post_loop_reduction) { 4736 assert(ctrl_or_self(use) != cl, "use must be outside loop"); 4737 use->replace_edge(last_accumulator, post_loop_reduction, &_igvn); 4738 --i; 4739 } 4740 } 4741 register_new_node(post_loop_reduction, get_late_ctrl(post_loop_reduction, cl)); 4742 VectorNode::trace_new_vector(post_loop_reduction, "Unordered Reduction"); 4743 4744 assert(last_accumulator->outcnt() == 2, "last_accumulator has 2 uses: phi and post_loop_reduction"); 4745 assert(post_loop_reduction->outcnt() > 0, "should have taken over all non loop uses of last_accumulator"); 4746 assert(phi->outcnt() == 1, "accumulator is the only use of phi"); 4747 } 4748 } 4749 4750 void DataNodeGraph::clone_data_nodes(Node* new_ctrl) { 4751 for (uint i = 0; i < _data_nodes.size(); i++) { 4752 clone(_data_nodes[i], new_ctrl); 4753 } 4754 } 4755 4756 // Clone the given node and set it up properly. Set 'new_ctrl' as ctrl. 4757 void DataNodeGraph::clone(Node* node, Node* new_ctrl) { 4758 Node* clone = node->clone(); 4759 _phase->igvn().register_new_node_with_optimizer(clone); 4760 _orig_to_new.put(node, clone); 4761 _phase->set_ctrl(clone, new_ctrl); 4762 if (node->is_CastII()) { 4763 clone->set_req(0, new_ctrl); 4764 } 4765 } 4766 4767 // Rewire the data inputs of all (unprocessed) cloned nodes, whose inputs are still pointing to the same inputs as their 4768 // corresponding orig nodes, to the newly cloned inputs to create a separate cloned graph. 4769 void DataNodeGraph::rewire_clones_to_cloned_inputs() { 4770 _orig_to_new.iterate_all([&](Node* node, Node* clone) { 4771 for (uint i = 1; i < node->req(); i++) { 4772 Node** cloned_input = _orig_to_new.get(node->in(i)); 4773 if (cloned_input != nullptr) { 4774 // Input was also cloned -> rewire clone to the cloned input. 4775 _phase->igvn().replace_input_of(clone, i, *cloned_input); 4776 } 4777 } 4778 }); 4779 } 4780 4781 // Clone all non-OpaqueLoop* nodes and apply the provided transformation strategy for OpaqueLoop* nodes. 4782 // Set 'new_ctrl' as ctrl for all cloned non-OpaqueLoop* nodes. 4783 void DataNodeGraph::clone_data_nodes_and_transform_opaque_loop_nodes( 4784 const TransformStrategyForOpaqueLoopNodes& transform_strategy, 4785 Node* new_ctrl) { 4786 for (uint i = 0; i < _data_nodes.size(); i++) { 4787 Node* data_node = _data_nodes[i]; 4788 if (data_node->is_Opaque1()) { 4789 transform_opaque_node(transform_strategy, data_node); 4790 } else { 4791 clone(data_node, new_ctrl); 4792 } 4793 } 4794 } 4795 4796 void DataNodeGraph::transform_opaque_node(const TransformStrategyForOpaqueLoopNodes& transform_strategy, Node* node) { 4797 Node* transformed_node; 4798 if (node->is_OpaqueLoopInit()) { 4799 transformed_node = transform_strategy.transform_opaque_init(node->as_OpaqueLoopInit()); 4800 } else { 4801 assert(node->is_OpaqueLoopStride(), "must be OpaqueLoopStrideNode"); 4802 transformed_node = transform_strategy.transform_opaque_stride(node->as_OpaqueLoopStride()); 4803 } 4804 // Add an orig->new mapping to correctly update the inputs of the copied graph in rewire_clones_to_cloned_inputs(). 4805 _orig_to_new.put(node, transformed_node); 4806 }