1 /* 2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "gc/shared/barrierSet.hpp" 26 #include "gc/shared/c2/barrierSetC2.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "opto/addnode.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/castnode.hpp" 32 #include "opto/connode.hpp" 33 #include "opto/castnode.hpp" 34 #include "opto/divnode.hpp" 35 #include "opto/inlinetypenode.hpp" 36 #include "opto/loopnode.hpp" 37 #include "opto/matcher.hpp" 38 #include "opto/mulnode.hpp" 39 #include "opto/movenode.hpp" 40 #include "opto/opaquenode.hpp" 41 #include "opto/rootnode.hpp" 42 #include "opto/subnode.hpp" 43 #include "opto/subtypenode.hpp" 44 #include "opto/superword.hpp" 45 #include "opto/vectornode.hpp" 46 #include "utilities/macros.hpp" 47 48 //============================================================================= 49 //------------------------------split_thru_phi--------------------------------- 50 // Split Node 'n' through merge point if there is enough win. 51 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) { 52 if ((n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) || 53 (n->Opcode() == Op_ConvL2I && n->bottom_type() != TypeInt::INT)) { 54 // ConvI2L/ConvL2I may have type information on it which is unsafe to push up 55 // so disable this for now 56 return nullptr; 57 } 58 59 // Splitting range check CastIIs through a loop induction Phi can 60 // cause new Phis to be created that are left unrelated to the loop 61 // induction Phi and prevent optimizations (vectorization) 62 if (n->Opcode() == Op_CastII && region->is_CountedLoop() && 63 n->in(1) == region->as_CountedLoop()->phi()) { 64 return nullptr; 65 } 66 67 // Inline types should not be split through Phis because they cannot be merged 68 // through Phi nodes but each value input needs to be merged individually. 69 if (n->is_InlineType()) { 70 return nullptr; 71 } 72 73 if (cannot_split_division(n, region)) { 74 return nullptr; 75 } 76 77 int wins = 0; 78 assert(!n->is_CFG(), ""); 79 assert(region->is_Region(), ""); 80 81 const Type* type = n->bottom_type(); 82 const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr(); 83 Node* phi; 84 if (t_oop != nullptr && t_oop->is_known_instance_field()) { 85 int iid = t_oop->instance_id(); 86 int index = C->get_alias_index(t_oop); 87 int offset = t_oop->offset(); 88 phi = new PhiNode(region, type, nullptr, iid, index, offset); 89 } else { 90 phi = PhiNode::make_blank(region, n); 91 } 92 uint old_unique = C->unique(); 93 for (uint i = 1; i < region->req(); i++) { 94 Node* x; 95 Node* the_clone = nullptr; 96 if (region->in(i) == C->top()) { 97 x = C->top(); // Dead path? Use a dead data op 98 } else { 99 x = n->clone(); // Else clone up the data op 100 the_clone = x; // Remember for possible deletion. 101 // Alter data node to use pre-phi inputs 102 if (n->in(0) == region) 103 x->set_req( 0, region->in(i) ); 104 for (uint j = 1; j < n->req(); j++) { 105 Node* in = n->in(j); 106 if (in->is_Phi() && in->in(0) == region) 107 x->set_req(j, in->in(i)); // Use pre-Phi input for the clone 108 } 109 } 110 // Check for a 'win' on some paths 111 const Type* t = x->Value(&_igvn); 112 113 bool singleton = t->singleton(); 114 115 // A TOP singleton indicates that there are no possible values incoming 116 // along a particular edge. In most cases, this is OK, and the Phi will 117 // be eliminated later in an Ideal call. However, we can't allow this to 118 // happen if the singleton occurs on loop entry, as the elimination of 119 // the PhiNode may cause the resulting node to migrate back to a previous 120 // loop iteration. 121 if (singleton && t == Type::TOP) { 122 // Is_Loop() == false does not confirm the absence of a loop (e.g., an 123 // irreducible loop may not be indicated by an affirmative is_Loop()); 124 // therefore, the only top we can split thru a phi is on a backedge of 125 // a loop. 126 singleton &= region->is_Loop() && (i != LoopNode::EntryControl); 127 } 128 129 if (singleton) { 130 wins++; 131 x = makecon(t); 132 } else { 133 // We now call Identity to try to simplify the cloned node. 134 // Note that some Identity methods call phase->type(this). 135 // Make sure that the type array is big enough for 136 // our new node, even though we may throw the node away. 137 // (Note: This tweaking with igvn only works because x is a new node.) 138 _igvn.set_type(x, t); 139 // If x is a TypeNode, capture any more-precise type permanently into Node 140 // otherwise it will be not updated during igvn->transform since 141 // igvn->type(x) is set to x->Value() already. 142 x->raise_bottom_type(t); 143 Node* y = x->Identity(&_igvn); 144 if (y != x) { 145 wins++; 146 x = y; 147 } else { 148 y = _igvn.hash_find(x); 149 if (y == nullptr) { 150 y = similar_subtype_check(x, region->in(i)); 151 } 152 if (y) { 153 wins++; 154 x = y; 155 } else { 156 // Else x is a new node we are keeping 157 // We do not need register_new_node_with_optimizer 158 // because set_type has already been called. 159 _igvn._worklist.push(x); 160 } 161 } 162 } 163 164 phi->set_req( i, x ); 165 166 if (the_clone == nullptr) { 167 continue; 168 } 169 170 if (the_clone != x) { 171 _igvn.remove_dead_node(the_clone); 172 } else if (region->is_Loop() && i == LoopNode::LoopBackControl && 173 n->is_Load() && can_move_to_inner_loop(n, region->as_Loop(), x)) { 174 // it is not a win if 'x' moved from an outer to an inner loop 175 // this edge case can only happen for Load nodes 176 wins = 0; 177 break; 178 } 179 } 180 // Too few wins? 181 if (wins <= policy) { 182 _igvn.remove_dead_node(phi); 183 return nullptr; 184 } 185 186 // Record Phi 187 register_new_node( phi, region ); 188 189 for (uint i2 = 1; i2 < phi->req(); i2++) { 190 Node *x = phi->in(i2); 191 // If we commoned up the cloned 'x' with another existing Node, 192 // the existing Node picks up a new use. We need to make the 193 // existing Node occur higher up so it dominates its uses. 194 Node *old_ctrl; 195 IdealLoopTree *old_loop; 196 197 if (x->is_Con()) { 198 assert(get_ctrl(x) == C->root(), "constant control is not root"); 199 continue; 200 } 201 // The occasional new node 202 if (x->_idx >= old_unique) { // Found a new, unplaced node? 203 old_ctrl = nullptr; 204 old_loop = nullptr; // Not in any prior loop 205 } else { 206 old_ctrl = get_ctrl(x); 207 old_loop = get_loop(old_ctrl); // Get prior loop 208 } 209 // New late point must dominate new use 210 Node *new_ctrl = dom_lca(old_ctrl, region->in(i2)); 211 if (new_ctrl == old_ctrl) // Nothing is changed 212 continue; 213 214 IdealLoopTree *new_loop = get_loop(new_ctrl); 215 216 // Don't move x into a loop if its uses are 217 // outside of loop. Otherwise x will be cloned 218 // for each use outside of this loop. 219 IdealLoopTree *use_loop = get_loop(region); 220 if (!new_loop->is_member(use_loop) && 221 (old_loop == nullptr || !new_loop->is_member(old_loop))) { 222 // Take early control, later control will be recalculated 223 // during next iteration of loop optimizations. 224 new_ctrl = get_early_ctrl(x); 225 new_loop = get_loop(new_ctrl); 226 } 227 // Set new location 228 set_ctrl(x, new_ctrl); 229 // If changing loop bodies, see if we need to collect into new body 230 if (old_loop != new_loop) { 231 if (old_loop && !old_loop->_child) 232 old_loop->_body.yank(x); 233 if (!new_loop->_child) 234 new_loop->_body.push(x); // Collect body info 235 } 236 } 237 238 return phi; 239 } 240 241 // Test whether node 'x' can move into an inner loop relative to node 'n'. 242 // Note: The test is not exact. Returns true if 'x' COULD end up in an inner loop, 243 // BUT it can also return true and 'x' is in the outer loop 244 bool PhaseIdealLoop::can_move_to_inner_loop(Node* n, LoopNode* n_loop, Node* x) { 245 IdealLoopTree* n_loop_tree = get_loop(n_loop); 246 IdealLoopTree* x_loop_tree = get_loop(get_early_ctrl(x)); 247 // x_loop_tree should be outer or same loop as n_loop_tree 248 return !x_loop_tree->is_member(n_loop_tree); 249 } 250 251 // Subtype checks that carry profile data don't common so look for a replacement by following edges 252 Node* PhaseIdealLoop::similar_subtype_check(const Node* x, Node* r_in) { 253 if (x->is_SubTypeCheck()) { 254 Node* in1 = x->in(1); 255 for (DUIterator_Fast imax, i = in1->fast_outs(imax); i < imax; i++) { 256 Node* u = in1->fast_out(i); 257 if (u != x && u->is_SubTypeCheck() && u->in(1) == x->in(1) && u->in(2) == x->in(2)) { 258 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { 259 Node* bol = u->fast_out(j); 260 for (DUIterator_Fast kmax, k = bol->fast_outs(kmax); k < kmax; k++) { 261 Node* iff = bol->fast_out(k); 262 // Only dominating subtype checks are interesting: otherwise we risk replacing a subtype check by another with 263 // unrelated profile 264 if (iff->is_If() && is_dominator(iff, r_in)) { 265 return u; 266 } 267 } 268 } 269 } 270 } 271 } 272 return nullptr; 273 } 274 275 // Return true if 'n' is a Div or Mod node (without zero check If node which was removed earlier) with a loop phi divisor 276 // of a trip-counted (integer or long) loop with a backedge input that could be zero (include zero in its type range). In 277 // this case, we cannot split the division to the backedge as it could freely float above the loop exit check resulting in 278 // a division by zero. This situation is possible because the type of an increment node of an iv phi (trip-counter) could 279 // include zero while the iv phi does not (see PhiNode::Value() for trip-counted loops where we improve types of iv phis). 280 // We also need to check other loop phis as they could have been created in the same split-if pass when applying 281 // PhaseIdealLoop::split_thru_phi() to split nodes through an iv phi. 282 bool PhaseIdealLoop::cannot_split_division(const Node* n, const Node* region) const { 283 const Type* zero; 284 switch (n->Opcode()) { 285 case Op_DivI: 286 case Op_ModI: 287 case Op_UDivI: 288 case Op_UModI: 289 zero = TypeInt::ZERO; 290 break; 291 case Op_DivL: 292 case Op_ModL: 293 case Op_UDivL: 294 case Op_UModL: 295 zero = TypeLong::ZERO; 296 break; 297 default: 298 return false; 299 } 300 301 if (n->in(0) != nullptr) { 302 // Cannot split through phi if Div or Mod node has a control dependency to a zero check. 303 return true; 304 } 305 306 Node* divisor = n->in(2); 307 return is_divisor_loop_phi(divisor, region) && 308 loop_phi_backedge_type_contains_zero(divisor, zero); 309 } 310 311 bool PhaseIdealLoop::is_divisor_loop_phi(const Node* divisor, const Node* loop) { 312 return loop->is_Loop() && divisor->is_Phi() && divisor->in(0) == loop; 313 } 314 315 bool PhaseIdealLoop::loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const { 316 return _igvn.type(phi_divisor->in(LoopNode::LoopBackControl))->filter_speculative(zero) != Type::TOP; 317 } 318 319 //------------------------------dominated_by------------------------------------ 320 // Replace the dominated test with an obvious true or false. Place it on the 321 // IGVN worklist for later cleanup. Move control-dependent data Nodes on the 322 // live path up to the dominating control. 323 void PhaseIdealLoop::dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip, bool pin_array_access_nodes) { 324 if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); } 325 326 // prevdom is the dominating projection of the dominating test. 327 assert(iff->Opcode() == Op_If || 328 iff->Opcode() == Op_CountedLoopEnd || 329 iff->Opcode() == Op_LongCountedLoopEnd || 330 iff->Opcode() == Op_RangeCheck || 331 iff->Opcode() == Op_ParsePredicate, 332 "Check this code when new subtype is added"); 333 334 int pop = prevdom->Opcode(); 335 assert( pop == Op_IfFalse || pop == Op_IfTrue, "" ); 336 if (flip) { 337 if (pop == Op_IfTrue) 338 pop = Op_IfFalse; 339 else 340 pop = Op_IfTrue; 341 } 342 // 'con' is set to true or false to kill the dominated test. 343 Node* con = makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO); 344 // Hack the dominated test 345 _igvn.replace_input_of(iff, 1, con); 346 347 // If I don't have a reachable TRUE and FALSE path following the IfNode then 348 // I can assume this path reaches an infinite loop. In this case it's not 349 // important to optimize the data Nodes - either the whole compilation will 350 // be tossed or this path (and all data Nodes) will go dead. 351 if (iff->outcnt() != 2) { 352 return; 353 } 354 355 // Make control-dependent data Nodes on the live path (path that will remain 356 // once the dominated IF is removed) become control-dependent on the 357 // dominating projection. 358 Node* dp = iff->proj_out_or_null(pop == Op_IfTrue); 359 360 if (dp == nullptr) { 361 return; 362 } 363 364 rewire_safe_outputs_to_dominator(dp, prevdom, pin_array_access_nodes); 365 } 366 367 void PhaseIdealLoop::rewire_safe_outputs_to_dominator(Node* source, Node* dominator, const bool pin_array_access_nodes) { 368 IdealLoopTree* old_loop = get_loop(source); 369 370 for (DUIterator_Fast imax, i = source->fast_outs(imax); i < imax; i++) { 371 Node* out = source->fast_out(i); // Control-dependent node 372 // Do not rewire Div and Mod nodes which could have a zero divisor to avoid skipping their zero check. 373 if (out->depends_only_on_test() && _igvn.no_dependent_zero_check(out)) { 374 assert(out->in(0) == source, "must be control dependent on source"); 375 _igvn.replace_input_of(out, 0, dominator); 376 if (pin_array_access_nodes) { 377 // Because of Loop Predication, Loads and range check Cast nodes that are control dependent on this range 378 // check (that is about to be removed) now depend on multiple dominating Hoisted Check Predicates. After the 379 // removal of this range check, these control dependent nodes end up at the lowest/nearest dominating predicate 380 // in the graph. To ensure that these Loads/Casts do not float above any of the dominating checks (even when the 381 // lowest dominating check is later replaced by yet another dominating check), we need to pin them at the lowest 382 // dominating check. 383 Node* clone = out->pin_array_access_node(); 384 if (clone != nullptr) { 385 clone = _igvn.register_new_node_with_optimizer(clone, out); 386 _igvn.replace_node(out, clone); 387 out = clone; 388 } 389 } 390 set_early_ctrl(out, false); 391 IdealLoopTree* new_loop = get_loop(get_ctrl(out)); 392 if (old_loop != new_loop) { 393 if (!old_loop->_child) { 394 old_loop->_body.yank(out); 395 } 396 if (!new_loop->_child) { 397 new_loop->_body.push(out); 398 } 399 } 400 --i; 401 --imax; 402 } 403 } 404 } 405 406 //------------------------------has_local_phi_input---------------------------- 407 // Return TRUE if 'n' has Phi inputs from its local block and no other 408 // block-local inputs (all non-local-phi inputs come from earlier blocks) 409 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) { 410 Node *n_ctrl = get_ctrl(n); 411 // See if some inputs come from a Phi in this block, or from before 412 // this block. 413 uint i; 414 for( i = 1; i < n->req(); i++ ) { 415 Node *phi = n->in(i); 416 if( phi->is_Phi() && phi->in(0) == n_ctrl ) 417 break; 418 } 419 if( i >= n->req() ) 420 return nullptr; // No Phi inputs; nowhere to clone thru 421 422 // Check for inputs created between 'n' and the Phi input. These 423 // must split as well; they have already been given the chance 424 // (courtesy of a post-order visit) and since they did not we must 425 // recover the 'cost' of splitting them by being very profitable 426 // when splitting 'n'. Since this is unlikely we simply give up. 427 for( i = 1; i < n->req(); i++ ) { 428 Node *m = n->in(i); 429 if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) { 430 // We allow the special case of AddP's with no local inputs. 431 // This allows us to split-up address expressions. 432 if (m->is_AddP() && 433 get_ctrl(m->in(AddPNode::Base)) != n_ctrl && 434 get_ctrl(m->in(AddPNode::Address)) != n_ctrl && 435 get_ctrl(m->in(AddPNode::Offset)) != n_ctrl) { 436 // Move the AddP up to the dominating point. That's fine because control of m's inputs 437 // must dominate get_ctrl(m) == n_ctrl and we just checked that the input controls are != n_ctrl. 438 Node* c = find_non_split_ctrl(idom(n_ctrl)); 439 if (c->is_OuterStripMinedLoop()) { 440 c->as_Loop()->verify_strip_mined(1); 441 c = c->in(LoopNode::EntryControl); 442 } 443 set_ctrl_and_loop(m, c); 444 continue; 445 } 446 return nullptr; 447 } 448 assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control"); 449 } 450 451 return n_ctrl; 452 } 453 454 // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2). 455 Node* PhaseIdealLoop::remix_address_expressions_add_left_shift(Node* n, IdealLoopTree* n_loop, Node* n_ctrl, BasicType bt) { 456 assert(bt == T_INT || bt == T_LONG, "only for integers"); 457 int n_op = n->Opcode(); 458 459 if (n_op == Op_LShift(bt)) { 460 // Scale is loop invariant 461 Node* scale = n->in(2); 462 Node* scale_ctrl = get_ctrl(scale); 463 IdealLoopTree* scale_loop = get_loop(scale_ctrl); 464 if (n_loop == scale_loop || !scale_loop->is_member(n_loop)) { 465 return nullptr; 466 } 467 const TypeInt* scale_t = scale->bottom_type()->isa_int(); 468 if (scale_t != nullptr && scale_t->is_con() && scale_t->get_con() >= 16) { 469 return nullptr; // Dont bother with byte/short masking 470 } 471 // Add must vary with loop (else shift would be loop-invariant) 472 Node* add = n->in(1); 473 Node* add_ctrl = get_ctrl(add); 474 IdealLoopTree* add_loop = get_loop(add_ctrl); 475 if (n_loop != add_loop) { 476 return nullptr; // happens w/ evil ZKM loops 477 } 478 479 // Convert I-V into I+ (0-V); same for V-I 480 if (add->Opcode() == Op_Sub(bt) && 481 _igvn.type(add->in(1)) != TypeInteger::zero(bt)) { 482 assert(add->Opcode() == Op_SubI || add->Opcode() == Op_SubL, ""); 483 Node* zero = integercon(0, bt); 484 Node* neg = SubNode::make(zero, add->in(2), bt); 485 register_new_node_with_ctrl_of(neg, add->in(2)); 486 add = AddNode::make(add->in(1), neg, bt); 487 register_new_node(add, add_ctrl); 488 } 489 if (add->Opcode() != Op_Add(bt)) return nullptr; 490 assert(add->Opcode() == Op_AddI || add->Opcode() == Op_AddL, ""); 491 // See if one add input is loop invariant 492 Node* add_var = add->in(1); 493 Node* add_var_ctrl = get_ctrl(add_var); 494 IdealLoopTree* add_var_loop = get_loop(add_var_ctrl); 495 Node* add_invar = add->in(2); 496 Node* add_invar_ctrl = get_ctrl(add_invar); 497 IdealLoopTree* add_invar_loop = get_loop(add_invar_ctrl); 498 if (add_invar_loop == n_loop) { 499 // Swap to find the invariant part 500 add_invar = add_var; 501 add_invar_ctrl = add_var_ctrl; 502 add_invar_loop = add_var_loop; 503 add_var = add->in(2); 504 } else if (add_var_loop != n_loop) { // Else neither input is loop invariant 505 return nullptr; 506 } 507 if (n_loop == add_invar_loop || !add_invar_loop->is_member(n_loop)) { 508 return nullptr; // No invariant part of the add? 509 } 510 511 // Yes! Reshape address expression! 512 Node* inv_scale = LShiftNode::make(add_invar, scale, bt); 513 Node* inv_scale_ctrl = 514 dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ? 515 add_invar_ctrl : scale_ctrl; 516 register_new_node(inv_scale, inv_scale_ctrl); 517 Node* var_scale = LShiftNode::make(add_var, scale, bt); 518 register_new_node(var_scale, n_ctrl); 519 Node* var_add = AddNode::make(var_scale, inv_scale, bt); 520 register_new_node(var_add, n_ctrl); 521 _igvn.replace_node(n, var_add); 522 return var_add; 523 } 524 return nullptr; 525 } 526 527 //------------------------------remix_address_expressions---------------------- 528 // Rework addressing expressions to get the most loop-invariant stuff 529 // moved out. We'd like to do all associative operators, but it's especially 530 // important (common) to do address expressions. 531 Node* PhaseIdealLoop::remix_address_expressions(Node* n) { 532 if (!has_ctrl(n)) return nullptr; 533 Node* n_ctrl = get_ctrl(n); 534 IdealLoopTree* n_loop = get_loop(n_ctrl); 535 536 // See if 'n' mixes loop-varying and loop-invariant inputs and 537 // itself is loop-varying. 538 539 // Only interested in binary ops (and AddP) 540 if (n->req() < 3 || n->req() > 4) return nullptr; 541 542 Node* n1_ctrl = get_ctrl(n->in( 1)); 543 Node* n2_ctrl = get_ctrl(n->in( 2)); 544 Node* n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3)); 545 IdealLoopTree* n1_loop = get_loop(n1_ctrl); 546 IdealLoopTree* n2_loop = get_loop(n2_ctrl); 547 IdealLoopTree* n3_loop = get_loop(n3_ctrl); 548 549 // Does one of my inputs spin in a tighter loop than self? 550 if ((n_loop->is_member(n1_loop) && n_loop != n1_loop) || 551 (n_loop->is_member(n2_loop) && n_loop != n2_loop) || 552 (n_loop->is_member(n3_loop) && n_loop != n3_loop)) { 553 return nullptr; // Leave well enough alone 554 } 555 556 // Is at least one of my inputs loop-invariant? 557 if (n1_loop == n_loop && 558 n2_loop == n_loop && 559 n3_loop == n_loop) { 560 return nullptr; // No loop-invariant inputs 561 } 562 563 Node* res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_INT); 564 if (res != nullptr) { 565 return res; 566 } 567 res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_LONG); 568 if (res != nullptr) { 569 return res; 570 } 571 572 int n_op = n->Opcode(); 573 // Replace (I+V) with (V+I) 574 if (n_op == Op_AddI || 575 n_op == Op_AddL || 576 n_op == Op_AddF || 577 n_op == Op_AddD || 578 n_op == Op_MulI || 579 n_op == Op_MulL || 580 n_op == Op_MulF || 581 n_op == Op_MulD) { 582 if (n2_loop == n_loop) { 583 assert(n1_loop != n_loop, ""); 584 n->swap_edges(1, 2); 585 } 586 } 587 588 // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V), 589 // but not if I2 is a constant. Skip for irreducible loops. 590 if (n_op == Op_AddP && n_loop->_head->is_Loop()) { 591 if (n2_loop == n_loop && n3_loop != n_loop) { 592 if (n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con()) { 593 Node* n22_ctrl = get_ctrl(n->in(2)->in(2)); 594 Node* n23_ctrl = get_ctrl(n->in(2)->in(3)); 595 IdealLoopTree* n22loop = get_loop(n22_ctrl); 596 IdealLoopTree* n23_loop = get_loop(n23_ctrl); 597 if (n22loop != n_loop && n22loop->is_member(n_loop) && 598 n23_loop == n_loop) { 599 Node* add1 = new AddPNode(n->in(1), n->in(2)->in(2), n->in(3)); 600 // Stuff new AddP in the loop preheader 601 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl)); 602 Node* add2 = new AddPNode(n->in(1), add1, n->in(2)->in(3)); 603 register_new_node(add2, n_ctrl); 604 _igvn.replace_node(n, add2); 605 return add2; 606 } 607 } 608 } 609 610 // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V) 611 if (n2_loop != n_loop && n3_loop == n_loop) { 612 if (n->in(3)->Opcode() == Op_AddX) { 613 Node* V = n->in(3)->in(1); 614 Node* I = n->in(3)->in(2); 615 if (is_member(n_loop,get_ctrl(V))) { 616 } else { 617 Node *tmp = V; V = I; I = tmp; 618 } 619 if (!is_member(n_loop,get_ctrl(I))) { 620 Node* add1 = new AddPNode(n->in(1), n->in(2), I); 621 // Stuff new AddP in the loop preheader 622 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl)); 623 Node* add2 = new AddPNode(n->in(1), add1, V); 624 register_new_node(add2, n_ctrl); 625 _igvn.replace_node(n, add2); 626 return add2; 627 } 628 } 629 } 630 } 631 632 return nullptr; 633 } 634 635 // Optimize ((in1[2*i] * in2[2*i]) + (in1[2*i+1] * in2[2*i+1])) 636 Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) { 637 assert(n->Opcode() == Op_AddI, "sanity"); 638 Node * nn = nullptr; 639 Node * in1 = n->in(1); 640 Node * in2 = n->in(2); 641 if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) { 642 IdealLoopTree* loop_n = get_loop(get_ctrl(n)); 643 if (loop_n->is_counted() && 644 loop_n->_head->as_Loop()->is_valid_counted_loop(T_INT) && 645 Matcher::match_rule_supported(Op_MulAddVS2VI) && 646 Matcher::match_rule_supported(Op_MulAddS2I)) { 647 Node* mul_in1 = in1->in(1); 648 Node* mul_in2 = in1->in(2); 649 Node* mul_in3 = in2->in(1); 650 Node* mul_in4 = in2->in(2); 651 if (mul_in1->Opcode() == Op_LoadS && 652 mul_in2->Opcode() == Op_LoadS && 653 mul_in3->Opcode() == Op_LoadS && 654 mul_in4->Opcode() == Op_LoadS) { 655 IdealLoopTree* loop1 = get_loop(get_ctrl(mul_in1)); 656 IdealLoopTree* loop2 = get_loop(get_ctrl(mul_in2)); 657 IdealLoopTree* loop3 = get_loop(get_ctrl(mul_in3)); 658 IdealLoopTree* loop4 = get_loop(get_ctrl(mul_in4)); 659 IdealLoopTree* loop5 = get_loop(get_ctrl(in1)); 660 IdealLoopTree* loop6 = get_loop(get_ctrl(in2)); 661 // All nodes should be in the same counted loop. 662 if (loop_n == loop1 && loop_n == loop2 && loop_n == loop3 && 663 loop_n == loop4 && loop_n == loop5 && loop_n == loop6) { 664 Node* adr1 = mul_in1->in(MemNode::Address); 665 Node* adr2 = mul_in2->in(MemNode::Address); 666 Node* adr3 = mul_in3->in(MemNode::Address); 667 Node* adr4 = mul_in4->in(MemNode::Address); 668 if (adr1->is_AddP() && adr2->is_AddP() && adr3->is_AddP() && adr4->is_AddP()) { 669 if ((adr1->in(AddPNode::Base) == adr3->in(AddPNode::Base)) && 670 (adr2->in(AddPNode::Base) == adr4->in(AddPNode::Base))) { 671 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in3, mul_in4); 672 register_new_node_with_ctrl_of(nn, n); 673 _igvn.replace_node(n, nn); 674 return nn; 675 } else if ((adr1->in(AddPNode::Base) == adr4->in(AddPNode::Base)) && 676 (adr2->in(AddPNode::Base) == adr3->in(AddPNode::Base))) { 677 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in4, mul_in3); 678 register_new_node_with_ctrl_of(nn, n); 679 _igvn.replace_node(n, nn); 680 return nn; 681 } 682 } 683 } 684 } 685 } 686 } 687 return nn; 688 } 689 690 //------------------------------conditional_move------------------------------- 691 // Attempt to replace a Phi with a conditional move. We have some pretty 692 // strict profitability requirements. All Phis at the merge point must 693 // be converted, so we can remove the control flow. We need to limit the 694 // number of c-moves to a small handful. All code that was in the side-arms 695 // of the CFG diamond is now speculatively executed. This code has to be 696 // "cheap enough". We are pretty much limited to CFG diamonds that merge 697 // 1 or 2 items with a total of 1 or 2 ops executed speculatively. 698 Node *PhaseIdealLoop::conditional_move( Node *region ) { 699 700 assert(region->is_Region(), "sanity check"); 701 if (region->req() != 3) return nullptr; 702 703 // Check for CFG diamond 704 Node *lp = region->in(1); 705 Node *rp = region->in(2); 706 if (!lp || !rp) return nullptr; 707 Node *lp_c = lp->in(0); 708 if (lp_c == nullptr || lp_c != rp->in(0) || !lp_c->is_If()) return nullptr; 709 IfNode *iff = lp_c->as_If(); 710 711 // Check for ops pinned in an arm of the diamond. 712 // Can't remove the control flow in this case 713 if (lp->outcnt() > 1) return nullptr; 714 if (rp->outcnt() > 1) return nullptr; 715 716 IdealLoopTree* r_loop = get_loop(region); 717 assert(r_loop == get_loop(iff), "sanity"); 718 // Always convert to CMOVE if all results are used only outside this loop. 719 bool used_inside_loop = (r_loop == _ltree_root); 720 721 // Check profitability 722 int cost = 0; 723 int phis = 0; 724 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 725 Node *out = region->fast_out(i); 726 if (!out->is_Phi()) continue; // Ignore other control edges, etc 727 phis++; 728 PhiNode* phi = out->as_Phi(); 729 BasicType bt = phi->type()->basic_type(); 730 switch (bt) { 731 case T_DOUBLE: 732 case T_FLOAT: 733 if (C->use_cmove()) { 734 continue; //TODO: maybe we want to add some cost 735 } 736 cost += Matcher::float_cmove_cost(); // Could be very expensive 737 break; 738 case T_LONG: { 739 cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's 740 } 741 case T_INT: // These all CMOV fine 742 case T_ADDRESS: { // (RawPtr) 743 cost++; 744 break; 745 } 746 case T_NARROWOOP: // Fall through 747 case T_OBJECT: { // Base oops are OK, but not derived oops 748 const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr(); 749 // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a 750 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus 751 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we 752 // have a Phi for the base here that we convert to a CMOVE all is well 753 // and good. But if the base is dead, we'll not make a CMOVE. Later 754 // the allocator will have to produce a base by creating a CMOVE of the 755 // relevant bases. This puts the allocator in the business of 756 // manufacturing expensive instructions, generally a bad plan. 757 // Just Say No to Conditionally-Moved Derived Pointers. 758 if (tp && tp->offset() != 0) 759 return nullptr; 760 cost++; 761 break; 762 } 763 default: 764 return nullptr; // In particular, can't do memory or I/O 765 } 766 // Add in cost any speculative ops 767 for (uint j = 1; j < region->req(); j++) { 768 Node *proj = region->in(j); 769 Node *inp = phi->in(j); 770 if (inp->isa_InlineType()) { 771 // TODO 8302217 This prevents PhiNode::push_inline_types_through 772 return nullptr; 773 } 774 if (get_ctrl(inp) == proj) { // Found local op 775 cost++; 776 // Check for a chain of dependent ops; these will all become 777 // speculative in a CMOV. 778 for (uint k = 1; k < inp->req(); k++) 779 if (get_ctrl(inp->in(k)) == proj) 780 cost += ConditionalMoveLimit; // Too much speculative goo 781 } 782 } 783 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode. 784 // This will likely Split-If, a higher-payoff operation. 785 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) { 786 Node* use = phi->fast_out(k); 787 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr()) 788 cost += ConditionalMoveLimit; 789 // Is there a use inside the loop? 790 // Note: check only basic types since CMoveP is pinned. 791 if (!used_inside_loop && is_java_primitive(bt)) { 792 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use); 793 if (r_loop == u_loop || r_loop->is_member(u_loop)) { 794 used_inside_loop = true; 795 } 796 } 797 } 798 }//for 799 Node* bol = iff->in(1); 800 assert(!bol->is_OpaqueInitializedAssertionPredicate(), "Initialized Assertion Predicates cannot form a diamond with Halt"); 801 if (bol->is_OpaqueTemplateAssertionPredicate()) { 802 // Ignore Template Assertion Predicates with OpaqueTemplateAssertionPredicate nodes. 803 return nullptr; 804 } 805 if (bol->is_OpaqueMultiversioning()) { 806 assert(bol->as_OpaqueMultiversioning()->is_useless(), "Must be useless, i.e. fast main loop has already disappeared."); 807 // Ignore multiversion_if that just lost its loops. The OpaqueMultiversioning is marked useless, 808 // and will make the multiversion_if constant fold in the next IGVN round. 809 return nullptr; 810 } 811 if (!bol->is_Bool()) { 812 assert(false, "Expected Bool, but got %s", NodeClassNames[bol->Opcode()]); 813 return nullptr; 814 } 815 int cmp_op = bol->in(1)->Opcode(); 816 if (cmp_op == Op_SubTypeCheck) { // SubTypeCheck expansion expects an IfNode 817 return nullptr; 818 } 819 // It is expensive to generate flags from a float compare. 820 // Avoid duplicated float compare. 821 if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return nullptr; 822 823 float infrequent_prob = PROB_UNLIKELY_MAG(3); 824 // Ignore cost and blocks frequency if CMOVE can be moved outside the loop. 825 if (used_inside_loop) { 826 if (cost >= ConditionalMoveLimit) return nullptr; // Too much goo 827 828 // BlockLayoutByFrequency optimization moves infrequent branch 829 // from hot path. No point in CMOV'ing in such case (110 is used 830 // instead of 100 to take into account not exactness of float value). 831 if (BlockLayoutByFrequency) { 832 infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f); 833 } 834 } 835 // Check for highly predictable branch. No point in CMOV'ing if 836 // we are going to predict accurately all the time. 837 if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) { 838 //keep going 839 } else if (iff->_prob < infrequent_prob || 840 iff->_prob > (1.0f - infrequent_prob)) 841 return nullptr; 842 843 // -------------- 844 // Now replace all Phis with CMOV's 845 Node *cmov_ctrl = iff->in(0); 846 uint flip = (lp->Opcode() == Op_IfTrue); 847 Node_List wq; 848 while (1) { 849 PhiNode* phi = nullptr; 850 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 851 Node *out = region->fast_out(i); 852 if (out->is_Phi()) { 853 phi = out->as_Phi(); 854 break; 855 } 856 } 857 if (phi == nullptr || _igvn.type(phi) == Type::TOP || !CMoveNode::supported(_igvn.type(phi))) { 858 break; 859 } 860 // Move speculative ops 861 wq.push(phi); 862 while (wq.size() > 0) { 863 Node *n = wq.pop(); 864 for (uint j = 1; j < n->req(); j++) { 865 Node* m = n->in(j); 866 if (m != nullptr && !is_dominator(get_ctrl(m), cmov_ctrl)) { 867 set_ctrl(m, cmov_ctrl); 868 wq.push(m); 869 } 870 } 871 } 872 Node* cmov = CMoveNode::make(iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi)); 873 register_new_node(cmov, cmov_ctrl); 874 _igvn.replace_node(phi, cmov); 875 #ifndef PRODUCT 876 if (TraceLoopOpts) { 877 tty->print("CMOV "); 878 r_loop->dump_head(); 879 if (Verbose) { 880 bol->in(1)->dump(1); 881 cmov->dump(1); 882 } 883 } 884 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } ); 885 #endif 886 } 887 888 // The useless CFG diamond will fold up later; see the optimization in 889 // RegionNode::Ideal. 890 _igvn._worklist.push(region); 891 892 return iff->in(1); 893 } 894 895 static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) { 896 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { 897 Node* u = m->fast_out(i); 898 if (u->is_CFG()) { 899 if (u->is_NeverBranch()) { 900 u = u->as_NeverBranch()->proj_out(0); 901 enqueue_cfg_uses(u, wq); 902 } else { 903 wq.push(u); 904 } 905 } 906 } 907 } 908 909 // Try moving a store out of a loop, right before the loop 910 Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) { 911 // Store has to be first in the loop body 912 IdealLoopTree *n_loop = get_loop(n_ctrl); 913 if (n->is_Store() && n_loop != _ltree_root && 914 n_loop->is_loop() && n_loop->_head->is_Loop() && 915 n->in(0) != nullptr) { 916 Node* address = n->in(MemNode::Address); 917 Node* value = n->in(MemNode::ValueIn); 918 Node* mem = n->in(MemNode::Memory); 919 IdealLoopTree* address_loop = get_loop(get_ctrl(address)); 920 IdealLoopTree* value_loop = get_loop(get_ctrl(value)); 921 922 // - address and value must be loop invariant 923 // - memory must be a memory Phi for the loop 924 // - Store must be the only store on this memory slice in the 925 // loop: if there's another store following this one then value 926 // written at iteration i by the second store could be overwritten 927 // at iteration i+n by the first store: it's not safe to move the 928 // first store out of the loop 929 // - nothing must observe the memory Phi: it guarantees no read 930 // before the store, we are also guaranteed the store post 931 // dominates the loop head (ignoring a possible early 932 // exit). Otherwise there would be extra Phi involved between the 933 // loop's Phi and the store. 934 // - there must be no early exit from the loop before the Store 935 // (such an exit most of the time would be an extra use of the 936 // memory Phi but sometimes is a bottom memory Phi that takes the 937 // store as input). 938 939 if (!n_loop->is_member(address_loop) && 940 !n_loop->is_member(value_loop) && 941 mem->is_Phi() && mem->in(0) == n_loop->_head && 942 mem->outcnt() == 1 && 943 mem->in(LoopNode::LoopBackControl) == n) { 944 945 assert(n_loop->_tail != nullptr, "need a tail"); 946 assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop"); 947 948 // Verify that there's no early exit of the loop before the store. 949 bool ctrl_ok = false; 950 { 951 // Follow control from loop head until n, we exit the loop or 952 // we reach the tail 953 ResourceMark rm; 954 Unique_Node_List wq; 955 wq.push(n_loop->_head); 956 957 for (uint next = 0; next < wq.size(); ++next) { 958 Node *m = wq.at(next); 959 if (m == n->in(0)) { 960 ctrl_ok = true; 961 continue; 962 } 963 assert(!has_ctrl(m), "should be CFG"); 964 if (!n_loop->is_member(get_loop(m)) || m == n_loop->_tail) { 965 ctrl_ok = false; 966 break; 967 } 968 enqueue_cfg_uses(m, wq); 969 if (wq.size() > 10) { 970 ctrl_ok = false; 971 break; 972 } 973 } 974 } 975 if (ctrl_ok) { 976 // move the Store 977 _igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem); 978 _igvn.replace_input_of(n, 0, n_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl)); 979 _igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl)); 980 // Disconnect the phi now. An empty phi can confuse other 981 // optimizations in this pass of loop opts. 982 _igvn.replace_node(mem, mem->in(LoopNode::EntryControl)); 983 n_loop->_body.yank(mem); 984 985 set_ctrl_and_loop(n, n->in(0)); 986 987 return n; 988 } 989 } 990 } 991 return nullptr; 992 } 993 994 // Try moving a store out of a loop, right after the loop 995 void PhaseIdealLoop::try_move_store_after_loop(Node* n) { 996 if (n->is_Store() && n->in(0) != nullptr) { 997 Node *n_ctrl = get_ctrl(n); 998 IdealLoopTree *n_loop = get_loop(n_ctrl); 999 // Store must be in a loop 1000 if (n_loop != _ltree_root && !n_loop->_irreducible) { 1001 Node* address = n->in(MemNode::Address); 1002 Node* value = n->in(MemNode::ValueIn); 1003 IdealLoopTree* address_loop = get_loop(get_ctrl(address)); 1004 // address must be loop invariant 1005 if (!n_loop->is_member(address_loop)) { 1006 // Store must be last on this memory slice in the loop and 1007 // nothing in the loop must observe it 1008 Node* phi = nullptr; 1009 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1010 Node* u = n->fast_out(i); 1011 if (has_ctrl(u)) { // control use? 1012 IdealLoopTree *u_loop = get_loop(get_ctrl(u)); 1013 if (!n_loop->is_member(u_loop)) { 1014 continue; 1015 } 1016 if (u->is_Phi() && u->in(0) == n_loop->_head) { 1017 assert(_igvn.type(u) == Type::MEMORY, "bad phi"); 1018 // multiple phis on the same slice are possible 1019 if (phi != nullptr) { 1020 return; 1021 } 1022 phi = u; 1023 continue; 1024 } 1025 } 1026 return; 1027 } 1028 if (phi != nullptr) { 1029 // Nothing in the loop before the store (next iteration) 1030 // must observe the stored value 1031 bool mem_ok = true; 1032 { 1033 ResourceMark rm; 1034 Unique_Node_List wq; 1035 wq.push(phi); 1036 for (uint next = 0; next < wq.size() && mem_ok; ++next) { 1037 Node *m = wq.at(next); 1038 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax && mem_ok; i++) { 1039 Node* u = m->fast_out(i); 1040 if (u->is_Store() || u->is_Phi()) { 1041 if (u != n) { 1042 wq.push(u); 1043 mem_ok = (wq.size() <= 10); 1044 } 1045 } else { 1046 mem_ok = false; 1047 break; 1048 } 1049 } 1050 } 1051 } 1052 if (mem_ok) { 1053 // Move the store out of the loop if the LCA of all 1054 // users (except for the phi) is outside the loop. 1055 Node* hook = new Node(1); 1056 hook->init_req(0, n_ctrl); // Add an input to prevent hook from being dead 1057 _igvn.rehash_node_delayed(phi); 1058 int count = phi->replace_edge(n, hook, &_igvn); 1059 assert(count > 0, "inconsistent phi"); 1060 1061 // Compute latest point this store can go 1062 Node* lca = get_late_ctrl(n, get_ctrl(n)); 1063 if (lca->is_OuterStripMinedLoop()) { 1064 lca = lca->in(LoopNode::EntryControl); 1065 } 1066 if (n_loop->is_member(get_loop(lca))) { 1067 // LCA is in the loop - bail out 1068 _igvn.replace_node(hook, n); 1069 return; 1070 } 1071 #ifdef ASSERT 1072 if (n_loop->_head->is_Loop() && n_loop->_head->as_Loop()->is_strip_mined()) { 1073 assert(n_loop->_head->Opcode() == Op_CountedLoop, "outer loop is a strip mined"); 1074 n_loop->_head->as_Loop()->verify_strip_mined(1); 1075 Node* outer = n_loop->_head->as_CountedLoop()->outer_loop(); 1076 IdealLoopTree* outer_loop = get_loop(outer); 1077 assert(n_loop->_parent == outer_loop, "broken loop tree"); 1078 assert(get_loop(lca) == outer_loop, "safepoint in outer loop consume all memory state"); 1079 } 1080 #endif 1081 lca = place_outside_loop(lca, n_loop); 1082 assert(!n_loop->is_member(get_loop(lca)), "control must not be back in the loop"); 1083 assert(get_loop(lca)->_nest < n_loop->_nest || get_loop(lca)->_head->as_Loop()->is_in_infinite_subgraph(), "must not be moved into inner loop"); 1084 1085 // Move store out of the loop 1086 _igvn.replace_node(hook, n->in(MemNode::Memory)); 1087 _igvn.replace_input_of(n, 0, lca); 1088 set_ctrl_and_loop(n, lca); 1089 1090 // Disconnect the phi now. An empty phi can confuse other 1091 // optimizations in this pass of loop opts.. 1092 if (phi->in(LoopNode::LoopBackControl) == phi) { 1093 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl)); 1094 n_loop->_body.yank(phi); 1095 } 1096 } 1097 } 1098 } 1099 } 1100 } 1101 } 1102 1103 // We can't use immutable memory for the flat array check because we are loading the mark word which is 1104 // mutable. Although the bits we are interested in are immutable (we check for markWord::unlocked_value), 1105 // we need to use raw memory to not break anti dependency analysis. Below code will attempt to still move 1106 // flat array checks out of loops, mainly to enable loop unswitching. 1107 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) { 1108 // Skip checks for more than one array 1109 if (n->req() > 3) { 1110 return; 1111 } 1112 Node* mem = n->in(FlatArrayCheckNode::Memory); 1113 Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast(); 1114 IdealLoopTree* check_loop = get_loop(get_ctrl(n)); 1115 IdealLoopTree* ary_loop = get_loop(get_ctrl(array)); 1116 1117 // Check if array is loop invariant 1118 if (!check_loop->is_member(ary_loop)) { 1119 // Walk up memory graph from the check until we leave the loop 1120 VectorSet wq; 1121 wq.set(mem->_idx); 1122 while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) { 1123 if (mem->is_Phi()) { 1124 mem = mem->in(1); 1125 } else if (mem->is_MergeMem()) { 1126 mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw); 1127 } else if (mem->is_Proj()) { 1128 mem = mem->in(0); 1129 } else if (mem->is_MemBar() || mem->is_SafePoint()) { 1130 mem = mem->in(TypeFunc::Memory); 1131 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { 1132 mem = mem->in(MemNode::Memory); 1133 } else { 1134 #ifdef ASSERT 1135 mem->dump(); 1136 #endif 1137 ShouldNotReachHere(); 1138 } 1139 if (wq.test_set(mem->_idx)) { 1140 return; 1141 } 1142 } 1143 // Replace memory input and re-compute ctrl to move the check out of the loop 1144 _igvn.replace_input_of(n, 1, mem); 1145 set_ctrl_and_loop(n, get_early_ctrl(n)); 1146 Node* bol = n->unique_out(); 1147 set_ctrl_and_loop(bol, get_early_ctrl(bol)); 1148 } 1149 } 1150 1151 // Split some nodes that take a counted loop phi as input at a counted 1152 // loop can cause vectorization of some expressions to fail 1153 bool PhaseIdealLoop::split_thru_phi_could_prevent_vectorization(Node* n, Node* n_blk) { 1154 if (!n_blk->is_CountedLoop()) { 1155 return false; 1156 } 1157 1158 int opcode = n->Opcode(); 1159 1160 if (opcode != Op_AndI && 1161 opcode != Op_MulI && 1162 opcode != Op_RotateRight && 1163 opcode != Op_RShiftI) { 1164 return false; 1165 } 1166 1167 return n->in(1) == n_blk->as_BaseCountedLoop()->phi(); 1168 } 1169 1170 //------------------------------split_if_with_blocks_pre----------------------- 1171 // Do the real work in a non-recursive function. Data nodes want to be 1172 // cloned in the pre-order so they can feed each other nicely. 1173 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) { 1174 // Cloning these guys is unlikely to win 1175 int n_op = n->Opcode(); 1176 if (n_op == Op_MergeMem) { 1177 return n; 1178 } 1179 if (n->is_Proj()) { 1180 return n; 1181 } 1182 1183 if (n->isa_FlatArrayCheck()) { 1184 move_flat_array_check_out_of_loop(n); 1185 return n; 1186 } 1187 1188 // Do not clone-up CmpFXXX variations, as these are always 1189 // followed by a CmpI 1190 if (n->is_Cmp()) { 1191 return n; 1192 } 1193 // Attempt to use a conditional move instead of a phi/branch 1194 if (ConditionalMoveLimit > 0 && n_op == Op_Region) { 1195 Node *cmov = conditional_move( n ); 1196 if (cmov) { 1197 return cmov; 1198 } 1199 } 1200 if (n->is_CFG() || n->is_LoadStore()) { 1201 return n; 1202 } 1203 if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd 1204 if (!C->major_progress()) { // If chance of no more loop opts... 1205 _igvn._worklist.push(n); // maybe we'll remove them 1206 } 1207 return n; 1208 } 1209 1210 if (n->is_Con()) { 1211 return n; // No cloning for Con nodes 1212 } 1213 1214 Node *n_ctrl = get_ctrl(n); 1215 if (!n_ctrl) { 1216 return n; // Dead node 1217 } 1218 1219 Node* res = try_move_store_before_loop(n, n_ctrl); 1220 if (res != nullptr) { 1221 return n; 1222 } 1223 1224 // Attempt to remix address expressions for loop invariants 1225 Node *m = remix_address_expressions( n ); 1226 if( m ) return m; 1227 1228 if (n_op == Op_AddI) { 1229 Node *nn = convert_add_to_muladd( n ); 1230 if ( nn ) return nn; 1231 } 1232 1233 if (n->is_ConstraintCast()) { 1234 Node* dom_cast = n->as_ConstraintCast()->dominating_cast(&_igvn, this); 1235 // ConstraintCastNode::dominating_cast() uses node control input to determine domination. 1236 // Node control inputs don't necessarily agree with loop control info (due to 1237 // transformations happened in between), thus additional dominance check is needed 1238 // to keep loop info valid. 1239 if (dom_cast != nullptr && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) { 1240 _igvn.replace_node(n, dom_cast); 1241 return dom_cast; 1242 } 1243 } 1244 1245 // Determine if the Node has inputs from some local Phi. 1246 // Returns the block to clone thru. 1247 Node *n_blk = has_local_phi_input( n ); 1248 if( !n_blk ) return n; 1249 1250 // Do not clone the trip counter through on a CountedLoop 1251 // (messes up the canonical shape). 1252 if (((n_blk->is_CountedLoop() || (n_blk->is_Loop() && n_blk->as_Loop()->is_loop_nest_inner_loop())) && n->Opcode() == Op_AddI) || 1253 (n_blk->is_LongCountedLoop() && n->Opcode() == Op_AddL)) { 1254 return n; 1255 } 1256 // Pushing a shift through the iv Phi can get in the way of addressing optimizations or range check elimination 1257 if (n_blk->is_BaseCountedLoop() && n->Opcode() == Op_LShift(n_blk->as_BaseCountedLoop()->bt()) && 1258 n->in(1) == n_blk->as_BaseCountedLoop()->phi()) { 1259 return n; 1260 } 1261 1262 if (split_thru_phi_could_prevent_vectorization(n, n_blk)) { 1263 return n; 1264 } 1265 1266 // Check for having no control input; not pinned. Allow 1267 // dominating control. 1268 if (n->in(0)) { 1269 Node *dom = idom(n_blk); 1270 if (dom_lca(n->in(0), dom) != n->in(0)) { 1271 return n; 1272 } 1273 } 1274 // Policy: when is it profitable. You must get more wins than 1275 // policy before it is considered profitable. Policy is usually 0, 1276 // so 1 win is considered profitable. Big merges will require big 1277 // cloning, so get a larger policy. 1278 int policy = n_blk->req() >> 2; 1279 1280 // If the loop is a candidate for range check elimination, 1281 // delay splitting through it's phi until a later loop optimization 1282 if (n_blk->is_BaseCountedLoop()) { 1283 IdealLoopTree *lp = get_loop(n_blk); 1284 if (lp && lp->_rce_candidate) { 1285 return n; 1286 } 1287 } 1288 1289 if (must_throttle_split_if()) return n; 1290 1291 // Split 'n' through the merge point if it is profitable 1292 Node *phi = split_thru_phi( n, n_blk, policy ); 1293 if (!phi) return n; 1294 1295 // Found a Phi to split thru! 1296 // Replace 'n' with the new phi 1297 _igvn.replace_node( n, phi ); 1298 // Moved a load around the loop, 'en-registering' something. 1299 if (n_blk->is_Loop() && n->is_Load() && 1300 !phi->in(LoopNode::LoopBackControl)->is_Load()) 1301 C->set_major_progress(); 1302 1303 return phi; 1304 } 1305 1306 static bool merge_point_too_heavy(Compile* C, Node* region) { 1307 // Bail out if the region and its phis have too many users. 1308 int weight = 0; 1309 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 1310 weight += region->fast_out(i)->outcnt(); 1311 } 1312 int nodes_left = C->max_node_limit() - C->live_nodes(); 1313 if (weight * 8 > nodes_left) { 1314 if (PrintOpto) { 1315 tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight); 1316 } 1317 return true; 1318 } else { 1319 return false; 1320 } 1321 } 1322 1323 static bool merge_point_safe(Node* region) { 1324 // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode 1325 // having a PhiNode input. This sidesteps the dangerous case where the split 1326 // ConvI2LNode may become TOP if the input Value() does not 1327 // overlap the ConvI2L range, leaving a node which may not dominate its 1328 // uses. 1329 // A better fix for this problem can be found in the BugTraq entry, but 1330 // expediency for Mantis demands this hack. 1331 #ifdef _LP64 1332 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 1333 Node* n = region->fast_out(i); 1334 if (n->is_Phi()) { 1335 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 1336 Node* m = n->fast_out(j); 1337 if (m->Opcode() == Op_ConvI2L) 1338 return false; 1339 if (m->is_CastII()) { 1340 return false; 1341 } 1342 } 1343 } 1344 } 1345 #endif 1346 return true; 1347 } 1348 1349 1350 //------------------------------place_outside_loop--------------------------------- 1351 // Place some computation outside of this loop on the path to the use passed as argument 1352 Node* PhaseIdealLoop::place_outside_loop(Node* useblock, IdealLoopTree* loop) const { 1353 Node* head = loop->_head; 1354 assert(!loop->is_member(get_loop(useblock)), "must be outside loop"); 1355 if (head->is_Loop() && head->as_Loop()->is_strip_mined()) { 1356 loop = loop->_parent; 1357 assert(loop->_head->is_OuterStripMinedLoop(), "malformed strip mined loop"); 1358 } 1359 1360 // Pick control right outside the loop 1361 for (;;) { 1362 Node* dom = idom(useblock); 1363 if (loop->is_member(get_loop(dom))) { 1364 break; 1365 } 1366 useblock = dom; 1367 } 1368 assert(find_non_split_ctrl(useblock) == useblock, "should be non split control"); 1369 return useblock; 1370 } 1371 1372 1373 bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) { 1374 if (!n->is_If() || n->is_BaseCountedLoopEnd()) { 1375 return false; 1376 } 1377 if (!n->in(0)->is_Region()) { 1378 return false; 1379 } 1380 1381 Node* region = n->in(0); 1382 Node* dom = idom(region); 1383 if (!dom->is_If() || !n->as_If()->same_condition(dom, &_igvn)) { 1384 return false; 1385 } 1386 IfNode* dom_if = dom->as_If(); 1387 Node* proj_true = dom_if->proj_out(1); 1388 Node* proj_false = dom_if->proj_out(0); 1389 1390 for (uint i = 1; i < region->req(); i++) { 1391 if (is_dominator(proj_true, region->in(i))) { 1392 continue; 1393 } 1394 if (is_dominator(proj_false, region->in(i))) { 1395 continue; 1396 } 1397 return false; 1398 } 1399 1400 return true; 1401 } 1402 1403 1404 bool PhaseIdealLoop::can_split_if(Node* n_ctrl) { 1405 if (must_throttle_split_if()) { 1406 return false; 1407 } 1408 1409 // Do not do 'split-if' if irreducible loops are present. 1410 if (_has_irreducible_loops) { 1411 return false; 1412 } 1413 1414 if (merge_point_too_heavy(C, n_ctrl)) { 1415 return false; 1416 } 1417 1418 // Do not do 'split-if' if some paths are dead. First do dead code 1419 // elimination and then see if its still profitable. 1420 for (uint i = 1; i < n_ctrl->req(); i++) { 1421 if (n_ctrl->in(i) == C->top()) { 1422 return false; 1423 } 1424 } 1425 1426 // If trying to do a 'Split-If' at the loop head, it is only 1427 // profitable if the cmp folds up on BOTH paths. Otherwise we 1428 // risk peeling a loop forever. 1429 1430 // CNC - Disabled for now. Requires careful handling of loop 1431 // body selection for the cloned code. Also, make sure we check 1432 // for any input path not being in the same loop as n_ctrl. For 1433 // irreducible loops we cannot check for 'n_ctrl->is_Loop()' 1434 // because the alternative loop entry points won't be converted 1435 // into LoopNodes. 1436 IdealLoopTree *n_loop = get_loop(n_ctrl); 1437 for (uint j = 1; j < n_ctrl->req(); j++) { 1438 if (get_loop(n_ctrl->in(j)) != n_loop) { 1439 return false; 1440 } 1441 } 1442 1443 // Check for safety of the merge point. 1444 if (!merge_point_safe(n_ctrl)) { 1445 return false; 1446 } 1447 1448 return true; 1449 } 1450 1451 // Detect if the node is the inner strip-mined loop 1452 // Return: null if it's not the case, or the exit of outer strip-mined loop 1453 static Node* is_inner_of_stripmined_loop(const Node* out) { 1454 Node* out_le = nullptr; 1455 1456 if (out->is_CountedLoopEnd()) { 1457 const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode(); 1458 1459 if (loop != nullptr && loop->is_strip_mined()) { 1460 out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit(); 1461 } 1462 } 1463 1464 return out_le; 1465 } 1466 1467 bool PhaseIdealLoop::flat_array_element_type_check(Node *n) { 1468 // If the CmpP is a subtype check for a value that has just been 1469 // loaded from an array, the subtype check guarantees the value 1470 // can't be stored in a flat array and the load of the value 1471 // happens with a flat array check then: push the type check 1472 // through the phi of the flat array check. This needs special 1473 // logic because the subtype check's input is not a phi but a 1474 // LoadKlass that must first be cloned through the phi. 1475 if (n->Opcode() != Op_CmpP) { 1476 return false; 1477 } 1478 1479 Node* klassptr = n->in(1); 1480 Node* klasscon = n->in(2); 1481 1482 if (klassptr->is_DecodeNarrowPtr()) { 1483 klassptr = klassptr->in(1); 1484 } 1485 1486 if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) { 1487 return false; 1488 } 1489 1490 if (!klasscon->is_Con()) { 1491 return false; 1492 } 1493 1494 Node* addr = klassptr->in(MemNode::Address); 1495 1496 if (!addr->is_AddP()) { 1497 return false; 1498 } 1499 1500 intptr_t offset; 1501 Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset); 1502 1503 if (obj == nullptr) { 1504 return false; 1505 } 1506 1507 assert(obj != nullptr && addr->in(AddPNode::Base) == addr->in(AddPNode::Address), "malformed AddP?"); 1508 if (obj->Opcode() == Op_CastPP) { 1509 obj = obj->in(1); 1510 } 1511 1512 if (!obj->is_Phi()) { 1513 return false; 1514 } 1515 1516 Node* region = obj->in(0); 1517 1518 Node* phi = PhiNode::make_blank(region, n->in(1)); 1519 for (uint i = 1; i < region->req(); i++) { 1520 Node* in = obj->in(i); 1521 Node* ctrl = region->in(i); 1522 if (addr->in(AddPNode::Base) != obj) { 1523 Node* cast = addr->in(AddPNode::Base); 1524 assert(cast->Opcode() == Op_CastPP && cast->in(0) != nullptr, "inconsistent subgraph"); 1525 Node* cast_clone = cast->clone(); 1526 cast_clone->set_req(0, ctrl); 1527 cast_clone->set_req(1, in); 1528 register_new_node(cast_clone, ctrl); 1529 const Type* tcast = cast_clone->Value(&_igvn); 1530 _igvn.set_type(cast_clone, tcast); 1531 cast_clone->as_Type()->set_type(tcast); 1532 in = cast_clone; 1533 } 1534 Node* addr_clone = addr->clone(); 1535 addr_clone->set_req(AddPNode::Base, in); 1536 addr_clone->set_req(AddPNode::Address, in); 1537 register_new_node(addr_clone, ctrl); 1538 _igvn.set_type(addr_clone, addr_clone->Value(&_igvn)); 1539 Node* klassptr_clone = klassptr->clone(); 1540 klassptr_clone->set_req(2, addr_clone); 1541 register_new_node(klassptr_clone, ctrl); 1542 _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn)); 1543 if (klassptr != n->in(1)) { 1544 Node* decode = n->in(1); 1545 assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph"); 1546 Node* decode_clone = decode->clone(); 1547 decode_clone->set_req(1, klassptr_clone); 1548 register_new_node(decode_clone, ctrl); 1549 _igvn.set_type(decode_clone, decode_clone->Value(&_igvn)); 1550 klassptr_clone = decode_clone; 1551 } 1552 phi->set_req(i, klassptr_clone); 1553 } 1554 register_new_node(phi, region); 1555 Node* orig = n->in(1); 1556 _igvn.replace_input_of(n, 1, phi); 1557 split_if_with_blocks_post(n); 1558 if (n->outcnt() != 0) { 1559 _igvn.replace_input_of(n, 1, orig); 1560 _igvn.remove_dead_node(phi); 1561 } 1562 return true; 1563 } 1564 1565 //------------------------------split_if_with_blocks_post---------------------- 1566 // Do the real work in a non-recursive function. CFG hackery wants to be 1567 // in the post-order, so it can dirty the I-DOM info and not use the dirtied 1568 // info. 1569 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) { 1570 1571 if (flat_array_element_type_check(n)) { 1572 return; 1573 } 1574 1575 // Cloning Cmp through Phi's involves the split-if transform. 1576 // FastLock is not used by an If 1577 if (n->is_Cmp() && !n->is_FastLock()) { 1578 Node *n_ctrl = get_ctrl(n); 1579 // Determine if the Node has inputs from some local Phi. 1580 // Returns the block to clone thru. 1581 Node *n_blk = has_local_phi_input(n); 1582 if (n_blk != n_ctrl) { 1583 return; 1584 } 1585 1586 if (!can_split_if(n_ctrl)) { 1587 return; 1588 } 1589 1590 if (n->outcnt() != 1) { 1591 return; // Multiple bool's from 1 compare? 1592 } 1593 Node *bol = n->unique_out(); 1594 assert(bol->is_Bool(), "expect a bool here"); 1595 if (bol->outcnt() != 1) { 1596 return;// Multiple branches from 1 compare? 1597 } 1598 Node *iff = bol->unique_out(); 1599 1600 // Check some safety conditions 1601 if (iff->is_If()) { // Classic split-if? 1602 if (iff->in(0) != n_ctrl) { 1603 return; // Compare must be in same blk as if 1604 } 1605 } else if (iff->is_CMove()) { // Trying to split-up a CMOVE 1606 // Can't split CMove with different control. 1607 if (get_ctrl(iff) != n_ctrl) { 1608 return; 1609 } 1610 if (get_ctrl(iff->in(2)) == n_ctrl || 1611 get_ctrl(iff->in(3)) == n_ctrl) { 1612 return; // Inputs not yet split-up 1613 } 1614 if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) { 1615 return; // Loop-invar test gates loop-varying CMOVE 1616 } 1617 } else { 1618 return; // some other kind of node, such as an Allocate 1619 } 1620 1621 // When is split-if profitable? Every 'win' on means some control flow 1622 // goes dead, so it's almost always a win. 1623 int policy = 0; 1624 // Split compare 'n' through the merge point if it is profitable 1625 Node *phi = split_thru_phi( n, n_ctrl, policy); 1626 if (!phi) { 1627 return; 1628 } 1629 1630 // Found a Phi to split thru! 1631 // Replace 'n' with the new phi 1632 _igvn.replace_node(n, phi); 1633 1634 // Now split the bool up thru the phi 1635 Node *bolphi = split_thru_phi(bol, n_ctrl, -1); 1636 guarantee(bolphi != nullptr, "null boolean phi node"); 1637 1638 _igvn.replace_node(bol, bolphi); 1639 assert(iff->in(1) == bolphi, ""); 1640 1641 if (bolphi->Value(&_igvn)->singleton()) { 1642 return; 1643 } 1644 1645 // Conditional-move? Must split up now 1646 if (!iff->is_If()) { 1647 Node *cmovphi = split_thru_phi(iff, n_ctrl, -1); 1648 _igvn.replace_node(iff, cmovphi); 1649 return; 1650 } 1651 1652 // Now split the IF 1653 C->print_method(PHASE_BEFORE_SPLIT_IF, 4, iff); 1654 if (TraceLoopOpts) { 1655 tty->print_cr("Split-If"); 1656 } 1657 do_split_if(iff); 1658 C->print_method(PHASE_AFTER_SPLIT_IF, 4, iff); 1659 return; 1660 } 1661 1662 // Two identical ifs back to back can be merged 1663 if (try_merge_identical_ifs(n)) { 1664 return; 1665 } 1666 1667 // Check for an IF ready to split; one that has its 1668 // condition codes input coming from a Phi at the block start. 1669 int n_op = n->Opcode(); 1670 1671 // Check for an IF being dominated by another IF same test 1672 if (n_op == Op_If || 1673 n_op == Op_RangeCheck) { 1674 Node *bol = n->in(1); 1675 uint max = bol->outcnt(); 1676 // Check for same test used more than once? 1677 if (bol->is_Bool() && (max > 1 || bol->in(1)->is_SubTypeCheck())) { 1678 // Search up IDOMs to see if this IF is dominated. 1679 Node* cmp = bol->in(1); 1680 Node *cutoff = cmp->is_SubTypeCheck() ? dom_lca(get_ctrl(cmp->in(1)), get_ctrl(cmp->in(2))) : get_ctrl(bol); 1681 1682 // Now search up IDOMs till cutoff, looking for a dominating test 1683 Node *prevdom = n; 1684 Node *dom = idom(prevdom); 1685 while (dom != cutoff) { 1686 if (dom->req() > 1 && n->as_If()->same_condition(dom, &_igvn) && prevdom->in(0) == dom && 1687 safe_for_if_replacement(dom)) { 1688 // It's invalid to move control dependent data nodes in the inner 1689 // strip-mined loop, because: 1690 // 1) break validation of LoopNode::verify_strip_mined() 1691 // 2) move code with side-effect in strip-mined loop 1692 // Move to the exit of outer strip-mined loop in that case. 1693 Node* out_le = is_inner_of_stripmined_loop(dom); 1694 if (out_le != nullptr) { 1695 prevdom = out_le; 1696 } 1697 // Replace the dominated test with an obvious true or false. 1698 // Place it on the IGVN worklist for later cleanup. 1699 C->set_major_progress(); 1700 // Split if: pin array accesses that are control dependent on a range check and moved to a regular if, 1701 // to prevent an array load from floating above its range check. There are three cases: 1702 // 1. Move from RangeCheck "a" to RangeCheck "b": don't need to pin. If we ever remove b, then we pin 1703 // all its array accesses at that point. 1704 // 2. We move from RangeCheck "a" to regular if "b": need to pin. If we ever remove b, then its array 1705 // accesses would start to float, since we don't pin at that point. 1706 // 3. If we move from regular if: don't pin. All array accesses are already assumed to be pinned. 1707 bool pin_array_access_nodes = n->Opcode() == Op_RangeCheck && 1708 prevdom->in(0)->Opcode() != Op_RangeCheck; 1709 dominated_by(prevdom->as_IfProj(), n->as_If(), false, pin_array_access_nodes); 1710 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } ); 1711 return; 1712 } 1713 prevdom = dom; 1714 dom = idom(prevdom); 1715 } 1716 } 1717 } 1718 1719 try_sink_out_of_loop(n); 1720 if (C->failing()) { 1721 return; 1722 } 1723 1724 try_move_store_after_loop(n); 1725 1726 // Remove multiple allocations of the same inline type 1727 if (n->is_InlineType()) { 1728 n->as_InlineType()->remove_redundant_allocations(this); 1729 } 1730 } 1731 1732 // Transform: 1733 // 1734 // if (some_condition) { 1735 // // body 1 1736 // } else { 1737 // // body 2 1738 // } 1739 // if (some_condition) { 1740 // // body 3 1741 // } else { 1742 // // body 4 1743 // } 1744 // 1745 // into: 1746 // 1747 // 1748 // if (some_condition) { 1749 // // body 1 1750 // // body 3 1751 // } else { 1752 // // body 2 1753 // // body 4 1754 // } 1755 bool PhaseIdealLoop::try_merge_identical_ifs(Node* n) { 1756 if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) { 1757 Node *n_ctrl = n->in(0); 1758 IfNode* dom_if = idom(n_ctrl)->as_If(); 1759 if (n->in(1) != dom_if->in(1)) { 1760 assert(n->in(1)->in(1)->is_SubTypeCheck() && 1761 (n->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr || 1762 dom_if->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr), "only for subtype checks with profile data attached"); 1763 _igvn.replace_input_of(n, 1, dom_if->in(1)); 1764 } 1765 ProjNode* dom_proj_true = dom_if->proj_out(1); 1766 ProjNode* dom_proj_false = dom_if->proj_out(0); 1767 1768 // Now split the IF 1769 RegionNode* new_false_region; 1770 RegionNode* new_true_region; 1771 do_split_if(n, &new_false_region, &new_true_region); 1772 assert(new_false_region->req() == new_true_region->req(), ""); 1773 #ifdef ASSERT 1774 for (uint i = 1; i < new_false_region->req(); ++i) { 1775 assert(new_false_region->in(i)->in(0) == new_true_region->in(i)->in(0), "unexpected shape following split if"); 1776 assert(i == new_false_region->req() - 1 || new_false_region->in(i)->in(0)->in(1) == new_false_region->in(i + 1)->in(0)->in(1), "unexpected shape following split if"); 1777 } 1778 #endif 1779 assert(new_false_region->in(1)->in(0)->in(1) == dom_if->in(1), "dominating if and dominated if after split must share test"); 1780 1781 // We now have: 1782 // if (some_condition) { 1783 // // body 1 1784 // if (some_condition) { 1785 // body3: // new_true_region 1786 // // body3 1787 // } else { 1788 // goto body4; 1789 // } 1790 // } else { 1791 // // body 2 1792 // if (some_condition) { 1793 // goto body3; 1794 // } else { 1795 // body4: // new_false_region 1796 // // body4; 1797 // } 1798 // } 1799 // 1800 1801 // clone pinned nodes thru the resulting regions 1802 push_pinned_nodes_thru_region(dom_if, new_true_region); 1803 push_pinned_nodes_thru_region(dom_if, new_false_region); 1804 1805 // Optimize out the cloned ifs. Because pinned nodes were cloned, this also allows a CastPP that would be dependent 1806 // on a projection of n to have the dom_if as a control dependency. We don't want the CastPP to end up with an 1807 // unrelated control dependency. 1808 for (uint i = 1; i < new_false_region->req(); i++) { 1809 if (is_dominator(dom_proj_true, new_false_region->in(i))) { 1810 dominated_by(dom_proj_true->as_IfProj(), new_false_region->in(i)->in(0)->as_If()); 1811 } else { 1812 assert(is_dominator(dom_proj_false, new_false_region->in(i)), "bad if"); 1813 dominated_by(dom_proj_false->as_IfProj(), new_false_region->in(i)->in(0)->as_If()); 1814 } 1815 } 1816 return true; 1817 } 1818 return false; 1819 } 1820 1821 void PhaseIdealLoop::push_pinned_nodes_thru_region(IfNode* dom_if, Node* region) { 1822 for (DUIterator i = region->outs(); region->has_out(i); i++) { 1823 Node* u = region->out(i); 1824 if (!has_ctrl(u) || u->is_Phi() || !u->depends_only_on_test() || !_igvn.no_dependent_zero_check(u)) { 1825 continue; 1826 } 1827 assert(u->in(0) == region, "not a control dependent node?"); 1828 uint j = 1; 1829 for (; j < u->req(); ++j) { 1830 Node* in = u->in(j); 1831 if (!is_dominator(ctrl_or_self(in), dom_if)) { 1832 break; 1833 } 1834 } 1835 if (j == u->req()) { 1836 Node *phi = PhiNode::make_blank(region, u); 1837 for (uint k = 1; k < region->req(); ++k) { 1838 Node* clone = u->clone(); 1839 clone->set_req(0, region->in(k)); 1840 register_new_node(clone, region->in(k)); 1841 phi->init_req(k, clone); 1842 } 1843 register_new_node(phi, region); 1844 _igvn.replace_node(u, phi); 1845 --i; 1846 } 1847 } 1848 } 1849 1850 bool PhaseIdealLoop::safe_for_if_replacement(const Node* dom) const { 1851 if (!dom->is_CountedLoopEnd()) { 1852 return true; 1853 } 1854 CountedLoopEndNode* le = dom->as_CountedLoopEnd(); 1855 CountedLoopNode* cl = le->loopnode(); 1856 if (cl == nullptr) { 1857 return true; 1858 } 1859 if (!cl->is_main_loop()) { 1860 return true; 1861 } 1862 if (cl->is_canonical_loop_entry() == nullptr) { 1863 return true; 1864 } 1865 // Further unrolling is possible so loop exit condition might change 1866 return false; 1867 } 1868 1869 // See if a shared loop-varying computation has no loop-varying uses. 1870 // Happens if something is only used for JVM state in uncommon trap exits, 1871 // like various versions of induction variable+offset. Clone the 1872 // computation per usage to allow it to sink out of the loop. 1873 void PhaseIdealLoop::try_sink_out_of_loop(Node* n) { 1874 if (has_ctrl(n) && 1875 !n->is_Phi() && 1876 !n->is_Bool() && 1877 !n->is_Proj() && 1878 !n->is_MergeMem() && 1879 !n->is_CMove() && 1880 !n->is_OpaqueNotNull() && 1881 !n->is_OpaqueInitializedAssertionPredicate() && 1882 !n->is_OpaqueTemplateAssertionPredicate() && 1883 !n->is_Type()) { 1884 Node *n_ctrl = get_ctrl(n); 1885 IdealLoopTree *n_loop = get_loop(n_ctrl); 1886 1887 if (n->in(0) != nullptr) { 1888 IdealLoopTree* loop_ctrl = get_loop(n->in(0)); 1889 if (n_loop != loop_ctrl && n_loop->is_member(loop_ctrl)) { 1890 // n has a control input inside a loop but get_ctrl() is member of an outer loop. This could happen, for example, 1891 // for Div nodes inside a loop (control input inside loop) without a use except for an UCT (outside the loop). 1892 // Rewire control of n to right outside of the loop, regardless if its input(s) are later sunk or not. 1893 Node* maybe_pinned_n = n; 1894 Node* outside_ctrl = place_outside_loop(n_ctrl, loop_ctrl); 1895 if (n->depends_only_on_test()) { 1896 Node* pinned_clone = n->pin_array_access_node(); 1897 if (pinned_clone != nullptr) { 1898 // Pin array access nodes: if this is an array load, it's going to be dependent on a condition that's not a 1899 // range check for that access. If that condition is replaced by an identical dominating one, then an 1900 // unpinned load would risk floating above its range check. 1901 register_new_node(pinned_clone, n_ctrl); 1902 maybe_pinned_n = pinned_clone; 1903 _igvn.replace_node(n, pinned_clone); 1904 } 1905 } 1906 _igvn.replace_input_of(maybe_pinned_n, 0, outside_ctrl); 1907 } 1908 } 1909 if (n_loop != _ltree_root && n->outcnt() > 1) { 1910 // Compute early control: needed for anti-dependence analysis. It's also possible that as a result of 1911 // previous transformations in this loop opts round, the node can be hoisted now: early control will tell us. 1912 Node* early_ctrl = compute_early_ctrl(n, n_ctrl); 1913 if (n_loop->is_member(get_loop(early_ctrl)) && // check that this one can't be hoisted now 1914 ctrl_of_all_uses_out_of_loop(n, early_ctrl, n_loop)) { // All uses in outer loops! 1915 if (n->is_Store() || n->is_LoadStore()) { 1916 assert(false, "no node with a side effect"); 1917 C->record_failure("no node with a side effect"); 1918 return; 1919 } 1920 Node* outer_loop_clone = nullptr; 1921 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin;) { 1922 Node* u = n->last_out(j); // Clone private computation per use 1923 _igvn.rehash_node_delayed(u); 1924 Node* x = nullptr; 1925 if (n->depends_only_on_test()) { 1926 // Pin array access nodes: if this is an array load, it's going to be dependent on a condition that's not a 1927 // range check for that access. If that condition is replaced by an identical dominating one, then an 1928 // unpinned load would risk floating above its range check. 1929 x = n->pin_array_access_node(); 1930 } 1931 if (x == nullptr) { 1932 x = n->clone(); 1933 } 1934 Node* x_ctrl = nullptr; 1935 if (u->is_Phi()) { 1936 // Replace all uses of normal nodes. Replace Phi uses 1937 // individually, so the separate Nodes can sink down 1938 // different paths. 1939 uint k = 1; 1940 while (u->in(k) != n) k++; 1941 u->set_req(k, x); 1942 // x goes next to Phi input path 1943 x_ctrl = u->in(0)->in(k); 1944 // Find control for 'x' next to use but not inside inner loops. 1945 x_ctrl = place_outside_loop(x_ctrl, n_loop); 1946 --j; 1947 } else { // Normal use 1948 if (has_ctrl(u)) { 1949 x_ctrl = get_ctrl(u); 1950 } else { 1951 x_ctrl = u->in(0); 1952 } 1953 // Find control for 'x' next to use but not inside inner loops. 1954 x_ctrl = place_outside_loop(x_ctrl, n_loop); 1955 // Replace all uses 1956 if (u->is_ConstraintCast() && _igvn.type(n)->higher_equal(u->bottom_type()) && u->in(0) == x_ctrl) { 1957 // If we're sinking a chain of data nodes, we might have inserted a cast to pin the use which is not necessary 1958 // anymore now that we're going to pin n as well 1959 _igvn.replace_node(u, x); 1960 --j; 1961 } else { 1962 int nb = u->replace_edge(n, x, &_igvn); 1963 j -= nb; 1964 } 1965 } 1966 1967 if (n->is_Load()) { 1968 // For loads, add a control edge to a CFG node outside of the loop 1969 // to force them to not combine and return back inside the loop 1970 // during GVN optimization (4641526). 1971 assert(x_ctrl == get_late_ctrl_with_anti_dep(x->as_Load(), early_ctrl, x_ctrl), "anti-dependences were already checked"); 1972 1973 IdealLoopTree* x_loop = get_loop(x_ctrl); 1974 Node* x_head = x_loop->_head; 1975 if (x_head->is_Loop() && x_head->is_OuterStripMinedLoop()) { 1976 // Do not add duplicate LoadNodes to the outer strip mined loop 1977 if (outer_loop_clone != nullptr) { 1978 _igvn.replace_node(x, outer_loop_clone); 1979 continue; 1980 } 1981 outer_loop_clone = x; 1982 } 1983 x->set_req(0, x_ctrl); 1984 } else if (n->in(0) != nullptr){ 1985 x->set_req(0, x_ctrl); 1986 } 1987 assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone"); 1988 assert(!n_loop->is_member(get_loop(x_ctrl)), "should have moved out of loop"); 1989 register_new_node(x, x_ctrl); 1990 1991 // Chain of AddP nodes: (AddP base (AddP base (AddP base ))) 1992 // All AddP nodes must keep the same base after sinking so: 1993 // 1- We don't add a CastPP here until the last one of the chain is sunk: if part of the chain is not sunk, 1994 // their bases remain the same. 1995 // (see 2- below) 1996 assert(!x->is_AddP() || !x->in(AddPNode::Address)->is_AddP() || 1997 x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base) || 1998 !x->in(AddPNode::Address)->in(AddPNode::Base)->eqv_uncast(x->in(AddPNode::Base)), "unexpected AddP shape"); 1999 if (x->in(0) == nullptr && !x->is_DecodeNarrowPtr() && 2000 !(x->is_AddP() && x->in(AddPNode::Address)->is_AddP() && x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base))) { 2001 assert(!x->is_Load(), "load should be pinned"); 2002 // Use a cast node to pin clone out of loop 2003 Node* cast = nullptr; 2004 for (uint k = 0; k < x->req(); k++) { 2005 Node* in = x->in(k); 2006 if (in != nullptr && n_loop->is_member(get_loop(get_ctrl(in)))) { 2007 const Type* in_t = _igvn.type(in); 2008 cast = ConstraintCastNode::make_cast_for_type(x_ctrl, in, in_t, 2009 ConstraintCastNode::UnconditionalDependency, nullptr); 2010 } 2011 if (cast != nullptr) { 2012 Node* prev = _igvn.hash_find_insert(cast); 2013 if (prev != nullptr && get_ctrl(prev) == x_ctrl) { 2014 cast->destruct(&_igvn); 2015 cast = prev; 2016 } else { 2017 register_new_node(cast, x_ctrl); 2018 } 2019 x->replace_edge(in, cast); 2020 // Chain of AddP nodes: 2021 // 2- A CastPP of the base is only added now that all AddP nodes are sunk 2022 if (x->is_AddP() && k == AddPNode::Base) { 2023 update_addp_chain_base(x, n->in(AddPNode::Base), cast); 2024 } 2025 break; 2026 } 2027 } 2028 assert(cast != nullptr, "must have added a cast to pin the node"); 2029 } 2030 } 2031 _igvn.remove_dead_node(n); 2032 } 2033 _dom_lca_tags_round = 0; 2034 } 2035 } 2036 } 2037 2038 void PhaseIdealLoop::update_addp_chain_base(Node* x, Node* old_base, Node* new_base) { 2039 ResourceMark rm; 2040 Node_List wq; 2041 wq.push(x); 2042 while (wq.size() != 0) { 2043 Node* n = wq.pop(); 2044 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2045 Node* u = n->fast_out(i); 2046 if (u->is_AddP() && u->in(AddPNode::Base) == old_base) { 2047 _igvn.replace_input_of(u, AddPNode::Base, new_base); 2048 wq.push(u); 2049 } 2050 } 2051 } 2052 } 2053 2054 // Compute the early control of a node by following its inputs until we reach 2055 // nodes that are pinned. Then compute the LCA of the control of all pinned nodes. 2056 Node* PhaseIdealLoop::compute_early_ctrl(Node* n, Node* n_ctrl) { 2057 Node* early_ctrl = nullptr; 2058 ResourceMark rm; 2059 Unique_Node_List wq; 2060 wq.push(n); 2061 for (uint i = 0; i < wq.size(); i++) { 2062 Node* m = wq.at(i); 2063 Node* c = nullptr; 2064 if (m->is_CFG()) { 2065 c = m; 2066 } else if (m->pinned()) { 2067 c = m->in(0); 2068 } else { 2069 for (uint j = 0; j < m->req(); j++) { 2070 Node* in = m->in(j); 2071 if (in != nullptr) { 2072 wq.push(in); 2073 } 2074 } 2075 } 2076 if (c != nullptr) { 2077 assert(is_dominator(c, n_ctrl), "control input must dominate current control"); 2078 if (early_ctrl == nullptr || is_dominator(early_ctrl, c)) { 2079 early_ctrl = c; 2080 } 2081 } 2082 } 2083 assert(is_dominator(early_ctrl, n_ctrl), "early control must dominate current control"); 2084 return early_ctrl; 2085 } 2086 2087 bool PhaseIdealLoop::ctrl_of_all_uses_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop) { 2088 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2089 Node* u = n->fast_out(i); 2090 if (u->is_Opaque1()) { 2091 return false; // Found loop limit, bugfix for 4677003 2092 } 2093 // We can't reuse tags in PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal() so make sure calls to 2094 // get_late_ctrl_with_anti_dep() use their own tag 2095 _dom_lca_tags_round++; 2096 assert(_dom_lca_tags_round != 0, "shouldn't wrap around"); 2097 2098 if (u->is_Phi()) { 2099 for (uint j = 1; j < u->req(); ++j) { 2100 if (u->in(j) == n && !ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, u->in(0)->in(j))) { 2101 return false; 2102 } 2103 } 2104 } else { 2105 Node* ctrl = has_ctrl(u) ? get_ctrl(u) : u->in(0); 2106 if (!ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, ctrl)) { 2107 return false; 2108 } 2109 } 2110 } 2111 return true; 2112 } 2113 2114 bool PhaseIdealLoop::ctrl_of_use_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop, Node* ctrl) { 2115 if (n->is_Load()) { 2116 ctrl = get_late_ctrl_with_anti_dep(n->as_Load(), n_ctrl, ctrl); 2117 } 2118 IdealLoopTree *u_loop = get_loop(ctrl); 2119 if (u_loop == n_loop) { 2120 return false; // Found loop-varying use 2121 } 2122 if (n_loop->is_member(u_loop)) { 2123 return false; // Found use in inner loop 2124 } 2125 // Sinking a node from a pre loop to its main loop pins the node between the pre and main loops. If that node is input 2126 // to a check that's eliminated by range check elimination, it becomes input to an expression that feeds into the exit 2127 // test of the pre loop above the point in the graph where it's pinned. 2128 if (n_loop->_head->is_CountedLoop() && n_loop->_head->as_CountedLoop()->is_pre_loop()) { 2129 CountedLoopNode* pre_loop = n_loop->_head->as_CountedLoop(); 2130 if (is_dominator(pre_loop->loopexit(), ctrl)) { 2131 return false; 2132 } 2133 } 2134 return true; 2135 } 2136 2137 //------------------------------split_if_with_blocks--------------------------- 2138 // Check for aggressive application of 'split-if' optimization, 2139 // using basic block level info. 2140 void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack) { 2141 Node* root = C->root(); 2142 visited.set(root->_idx); // first, mark root as visited 2143 // Do pre-visit work for root 2144 Node* n = split_if_with_blocks_pre(root); 2145 uint cnt = n->outcnt(); 2146 uint i = 0; 2147 2148 while (true) { 2149 // Visit all children 2150 if (i < cnt) { 2151 Node* use = n->raw_out(i); 2152 ++i; 2153 if (use->outcnt() != 0 && !visited.test_set(use->_idx)) { 2154 // Now do pre-visit work for this use 2155 use = split_if_with_blocks_pre(use); 2156 nstack.push(n, i); // Save parent and next use's index. 2157 n = use; // Process all children of current use. 2158 cnt = use->outcnt(); 2159 i = 0; 2160 } 2161 } 2162 else { 2163 // All of n's children have been processed, complete post-processing. 2164 if (cnt != 0 && !n->is_Con()) { 2165 assert(has_node(n), "no dead nodes"); 2166 split_if_with_blocks_post(n); 2167 if (C->failing()) { 2168 return; 2169 } 2170 } 2171 if (must_throttle_split_if()) { 2172 nstack.clear(); 2173 } 2174 if (nstack.is_empty()) { 2175 // Finished all nodes on stack. 2176 break; 2177 } 2178 // Get saved parent node and next use's index. Visit the rest of uses. 2179 n = nstack.node(); 2180 cnt = n->outcnt(); 2181 i = nstack.index(); 2182 nstack.pop(); 2183 } 2184 } 2185 } 2186 2187 2188 //============================================================================= 2189 // 2190 // C L O N E A L O O P B O D Y 2191 // 2192 2193 //------------------------------clone_iff-------------------------------------- 2194 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. 2195 // "Nearly" because all Nodes have been cloned from the original in the loop, 2196 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs 2197 // through the Phi recursively, and return a Bool. 2198 Node* PhaseIdealLoop::clone_iff(PhiNode* phi) { 2199 2200 // Convert this Phi into a Phi merging Bools 2201 uint i; 2202 for (i = 1; i < phi->req(); i++) { 2203 Node* b = phi->in(i); 2204 if (b->is_Phi()) { 2205 _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi())); 2206 } else { 2207 assert(b->is_Bool() || b->is_OpaqueNotNull() || b->is_OpaqueInitializedAssertionPredicate(), 2208 "bool, non-null check with OpaqueNotNull or Initialized Assertion Predicate with its Opaque node"); 2209 } 2210 } 2211 Node* n = phi->in(1); 2212 Node* sample_opaque = nullptr; 2213 Node *sample_bool = nullptr; 2214 if (n->is_OpaqueNotNull() || n->is_OpaqueInitializedAssertionPredicate()) { 2215 sample_opaque = n; 2216 sample_bool = n->in(1); 2217 assert(sample_bool->is_Bool(), "wrong type"); 2218 } else { 2219 sample_bool = n; 2220 } 2221 Node* sample_cmp = sample_bool->in(1); 2222 const Type* t = Type::TOP; 2223 const TypePtr* at = nullptr; 2224 if (sample_cmp->is_FlatArrayCheck()) { 2225 // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly 2226 assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type"); 2227 t = Type::MEMORY; 2228 at = TypeRawPtr::BOTTOM; 2229 } 2230 2231 // Make Phis to merge the Cmp's inputs. 2232 PhiNode *phi1 = new PhiNode(phi->in(0), t, at); 2233 PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP); 2234 for (i = 1; i < phi->req(); i++) { 2235 Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1); 2236 Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2); 2237 phi1->set_req(i, n1); 2238 phi2->set_req(i, n2); 2239 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type())); 2240 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type())); 2241 } 2242 // See if these Phis have been made before. 2243 // Register with optimizer 2244 Node *hit1 = _igvn.hash_find_insert(phi1); 2245 if (hit1) { // Hit, toss just made Phi 2246 _igvn.remove_dead_node(phi1); // Remove new phi 2247 assert(hit1->is_Phi(), "" ); 2248 phi1 = (PhiNode*)hit1; // Use existing phi 2249 } else { // Miss 2250 _igvn.register_new_node_with_optimizer(phi1); 2251 } 2252 Node *hit2 = _igvn.hash_find_insert(phi2); 2253 if (hit2) { // Hit, toss just made Phi 2254 _igvn.remove_dead_node(phi2); // Remove new phi 2255 assert(hit2->is_Phi(), "" ); 2256 phi2 = (PhiNode*)hit2; // Use existing phi 2257 } else { // Miss 2258 _igvn.register_new_node_with_optimizer(phi2); 2259 } 2260 // Register Phis with loop/block info 2261 set_ctrl(phi1, phi->in(0)); 2262 set_ctrl(phi2, phi->in(0)); 2263 // Make a new Cmp 2264 Node *cmp = sample_cmp->clone(); 2265 cmp->set_req(1, phi1); 2266 cmp->set_req(2, phi2); 2267 _igvn.register_new_node_with_optimizer(cmp); 2268 set_ctrl(cmp, phi->in(0)); 2269 2270 // Make a new Bool 2271 Node *b = sample_bool->clone(); 2272 b->set_req(1,cmp); 2273 _igvn.register_new_node_with_optimizer(b); 2274 set_ctrl(b, phi->in(0)); 2275 2276 if (sample_opaque != nullptr) { 2277 Node* opaque = sample_opaque->clone(); 2278 opaque->set_req(1, b); 2279 _igvn.register_new_node_with_optimizer(opaque); 2280 set_ctrl(opaque, phi->in(0)); 2281 return opaque; 2282 } 2283 2284 assert(b->is_Bool(), ""); 2285 return b; 2286 } 2287 2288 //------------------------------clone_bool------------------------------------- 2289 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. 2290 // "Nearly" because all Nodes have been cloned from the original in the loop, 2291 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs 2292 // through the Phi recursively, and return a Bool. 2293 CmpNode*PhaseIdealLoop::clone_bool(PhiNode* phi) { 2294 uint i; 2295 // Convert this Phi into a Phi merging Bools 2296 for( i = 1; i < phi->req(); i++ ) { 2297 Node *b = phi->in(i); 2298 if( b->is_Phi() ) { 2299 _igvn.replace_input_of(phi, i, clone_bool(b->as_Phi())); 2300 } else { 2301 assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" ); 2302 } 2303 } 2304 2305 Node *sample_cmp = phi->in(1); 2306 2307 // Make Phis to merge the Cmp's inputs. 2308 PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP ); 2309 PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP ); 2310 for( uint j = 1; j < phi->req(); j++ ) { 2311 Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP 2312 Node *n1, *n2; 2313 if( cmp_top->is_Cmp() ) { 2314 n1 = cmp_top->in(1); 2315 n2 = cmp_top->in(2); 2316 } else { 2317 n1 = n2 = cmp_top; 2318 } 2319 phi1->set_req( j, n1 ); 2320 phi2->set_req( j, n2 ); 2321 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type())); 2322 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type())); 2323 } 2324 2325 // See if these Phis have been made before. 2326 // Register with optimizer 2327 Node *hit1 = _igvn.hash_find_insert(phi1); 2328 if( hit1 ) { // Hit, toss just made Phi 2329 _igvn.remove_dead_node(phi1); // Remove new phi 2330 assert( hit1->is_Phi(), "" ); 2331 phi1 = (PhiNode*)hit1; // Use existing phi 2332 } else { // Miss 2333 _igvn.register_new_node_with_optimizer(phi1); 2334 } 2335 Node *hit2 = _igvn.hash_find_insert(phi2); 2336 if( hit2 ) { // Hit, toss just made Phi 2337 _igvn.remove_dead_node(phi2); // Remove new phi 2338 assert( hit2->is_Phi(), "" ); 2339 phi2 = (PhiNode*)hit2; // Use existing phi 2340 } else { // Miss 2341 _igvn.register_new_node_with_optimizer(phi2); 2342 } 2343 // Register Phis with loop/block info 2344 set_ctrl(phi1, phi->in(0)); 2345 set_ctrl(phi2, phi->in(0)); 2346 // Make a new Cmp 2347 Node *cmp = sample_cmp->clone(); 2348 cmp->set_req( 1, phi1 ); 2349 cmp->set_req( 2, phi2 ); 2350 _igvn.register_new_node_with_optimizer(cmp); 2351 set_ctrl(cmp, phi->in(0)); 2352 2353 assert( cmp->is_Cmp(), "" ); 2354 return (CmpNode*)cmp; 2355 } 2356 2357 void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new, 2358 IdealLoopTree* loop, IdealLoopTree* outer_loop, 2359 Node_List*& split_if_set, Node_List*& split_bool_set, 2360 Node_List*& split_cex_set, Node_List& worklist, 2361 uint new_counter, CloneLoopMode mode) { 2362 Node* nnn = old_new[old->_idx]; 2363 // Copy uses to a worklist, so I can munge the def-use info 2364 // with impunity. 2365 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) 2366 worklist.push(old->fast_out(j)); 2367 2368 while( worklist.size() ) { 2369 Node *use = worklist.pop(); 2370 if (!has_node(use)) continue; // Ignore dead nodes 2371 if (use->in(0) == C->top()) continue; 2372 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use ); 2373 // Check for data-use outside of loop - at least one of OLD or USE 2374 // must not be a CFG node. 2375 #ifdef ASSERT 2376 if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == nullptr) { 2377 Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint(); 2378 assert(mode != IgnoreStripMined, "incorrect cloning mode"); 2379 assert((mode == ControlAroundStripMined && use == sfpt) || !use->is_reachable_from_root(), "missed a node"); 2380 } 2381 #endif 2382 if (!loop->is_member(use_loop) && !outer_loop->is_member(use_loop) && (!old->is_CFG() || !use->is_CFG())) { 2383 2384 // If the Data use is an IF, that means we have an IF outside the 2385 // loop that is switching on a condition that is set inside the 2386 // loop. Happens if people set a loop-exit flag; then test the flag 2387 // in the loop to break the loop, then test is again outside the 2388 // loop to determine which way the loop exited. 2389 // 2390 // For several uses we need to make sure that there is no phi between, 2391 // the use and the Bool/Cmp. We therefore clone the Bool/Cmp down here 2392 // to avoid such a phi in between. 2393 // For example, it is unexpected that there is a Phi between an 2394 // AllocateArray node and its ValidLengthTest input that could cause 2395 // split if to break. 2396 assert(!use->is_OpaqueTemplateAssertionPredicate(), 2397 "should not clone a Template Assertion Predicate which should be removed once it's useless"); 2398 if (use->is_If() || use->is_CMove() || use->is_OpaqueNotNull() || use->is_OpaqueInitializedAssertionPredicate() || 2399 (use->Opcode() == Op_AllocateArray && use->in(AllocateNode::ValidLengthTest) == old)) { 2400 // Since this code is highly unlikely, we lazily build the worklist 2401 // of such Nodes to go split. 2402 if (!split_if_set) { 2403 split_if_set = new Node_List(); 2404 } 2405 split_if_set->push(use); 2406 } 2407 if (use->is_Bool()) { 2408 if (!split_bool_set) { 2409 split_bool_set = new Node_List(); 2410 } 2411 split_bool_set->push(use); 2412 } 2413 if (use->Opcode() == Op_CreateEx) { 2414 if (!split_cex_set) { 2415 split_cex_set = new Node_List(); 2416 } 2417 split_cex_set->push(use); 2418 } 2419 2420 2421 // Get "block" use is in 2422 uint idx = 0; 2423 while( use->in(idx) != old ) idx++; 2424 Node *prev = use->is_CFG() ? use : get_ctrl(use); 2425 assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" ); 2426 Node* cfg = (prev->_idx >= new_counter && prev->is_Region()) 2427 ? prev->in(2) 2428 : idom(prev); 2429 if( use->is_Phi() ) // Phi use is in prior block 2430 cfg = prev->in(idx); // NOT in block of Phi itself 2431 if (cfg->is_top()) { // Use is dead? 2432 _igvn.replace_input_of(use, idx, C->top()); 2433 continue; 2434 } 2435 2436 // If use is referenced through control edge... (idx == 0) 2437 if (mode == IgnoreStripMined && idx == 0) { 2438 LoopNode *head = loop->_head->as_Loop(); 2439 if (head->is_strip_mined() && is_dominator(head->outer_loop_exit(), prev)) { 2440 // That node is outside the inner loop, leave it outside the 2441 // outer loop as well to not confuse verification code. 2442 assert(!loop->_parent->is_member(use_loop), "should be out of the outer loop"); 2443 _igvn.replace_input_of(use, 0, head->outer_loop_exit()); 2444 continue; 2445 } 2446 } 2447 2448 while(!outer_loop->is_member(get_loop(cfg))) { 2449 prev = cfg; 2450 cfg = (cfg->_idx >= new_counter && cfg->is_Region()) ? cfg->in(2) : idom(cfg); 2451 } 2452 // If the use occurs after merging several exits from the loop, then 2453 // old value must have dominated all those exits. Since the same old 2454 // value was used on all those exits we did not need a Phi at this 2455 // merge point. NOW we do need a Phi here. Each loop exit value 2456 // is now merged with the peeled body exit; each exit gets its own 2457 // private Phi and those Phis need to be merged here. 2458 Node *phi; 2459 if( prev->is_Region() ) { 2460 if( idx == 0 ) { // Updating control edge? 2461 phi = prev; // Just use existing control 2462 } else { // Else need a new Phi 2463 phi = PhiNode::make( prev, old ); 2464 // Now recursively fix up the new uses of old! 2465 for( uint i = 1; i < prev->req(); i++ ) { 2466 worklist.push(phi); // Onto worklist once for each 'old' input 2467 } 2468 } 2469 } else { 2470 // Get new RegionNode merging old and new loop exits 2471 prev = old_new[prev->_idx]; 2472 assert( prev, "just made this in step 7" ); 2473 if( idx == 0) { // Updating control edge? 2474 phi = prev; // Just use existing control 2475 } else { // Else need a new Phi 2476 // Make a new Phi merging data values properly 2477 phi = PhiNode::make( prev, old ); 2478 phi->set_req( 1, nnn ); 2479 } 2480 } 2481 // If inserting a new Phi, check for prior hits 2482 if( idx != 0 ) { 2483 Node *hit = _igvn.hash_find_insert(phi); 2484 if( hit == nullptr ) { 2485 _igvn.register_new_node_with_optimizer(phi); // Register new phi 2486 } else { // or 2487 // Remove the new phi from the graph and use the hit 2488 _igvn.remove_dead_node(phi); 2489 phi = hit; // Use existing phi 2490 } 2491 set_ctrl(phi, prev); 2492 } 2493 // Make 'use' use the Phi instead of the old loop body exit value 2494 assert(use->in(idx) == old, "old is still input of use"); 2495 // We notify all uses of old, including use, and the indirect uses, 2496 // that may now be optimized because we have replaced old with phi. 2497 _igvn.add_users_to_worklist(old); 2498 if (idx == 0 && 2499 use->depends_only_on_test()) { 2500 Node* pinned_clone = use->pin_array_access_node(); 2501 if (pinned_clone != nullptr) { 2502 // Pin array access nodes: control is updated here to a region. If, after some transformations, only one path 2503 // into the region is left, an array load could become dependent on a condition that's not a range check for 2504 // that access. If that condition is replaced by an identical dominating one, then an unpinned load would risk 2505 // floating above its range check. 2506 pinned_clone->set_req(0, phi); 2507 register_new_node_with_ctrl_of(pinned_clone, use); 2508 _igvn.replace_node(use, pinned_clone); 2509 continue; 2510 } 2511 } 2512 _igvn.replace_input_of(use, idx, phi); 2513 if( use->_idx >= new_counter ) { // If updating new phis 2514 // Not needed for correctness, but prevents a weak assert 2515 // in AddPNode from tripping (when we end up with different 2516 // base & derived Phis that will become the same after 2517 // IGVN does CSE). 2518 Node *hit = _igvn.hash_find_insert(use); 2519 if( hit ) // Go ahead and re-hash for hits. 2520 _igvn.replace_node( use, hit ); 2521 } 2522 } 2523 } 2524 } 2525 2526 static void collect_nodes_in_outer_loop_not_reachable_from_sfpt(Node* n, const IdealLoopTree *loop, const IdealLoopTree* outer_loop, 2527 const Node_List &old_new, Unique_Node_List& wq, PhaseIdealLoop* phase, 2528 bool check_old_new) { 2529 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 2530 Node* u = n->fast_out(j); 2531 assert(check_old_new || old_new[u->_idx] == nullptr, "shouldn't have been cloned"); 2532 if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == nullptr)) { 2533 Node* c = phase->get_ctrl(u); 2534 IdealLoopTree* u_loop = phase->get_loop(c); 2535 assert(!loop->is_member(u_loop) || !loop->_body.contains(u), "can be in outer loop or out of both loops only"); 2536 if (!loop->is_member(u_loop)) { 2537 if (outer_loop->is_member(u_loop)) { 2538 wq.push(u); 2539 } else { 2540 // nodes pinned with control in the outer loop but not referenced from the safepoint must be moved out of 2541 // the outer loop too 2542 Node* u_c = u->in(0); 2543 if (u_c != nullptr) { 2544 IdealLoopTree* u_c_loop = phase->get_loop(u_c); 2545 if (outer_loop->is_member(u_c_loop) && !loop->is_member(u_c_loop)) { 2546 wq.push(u); 2547 } 2548 } 2549 } 2550 } 2551 } 2552 } 2553 } 2554 2555 void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop, 2556 IdealLoopTree* outer_loop, int dd, Node_List &old_new, 2557 Node_List& extra_data_nodes) { 2558 if (head->is_strip_mined() && mode != IgnoreStripMined) { 2559 CountedLoopNode* cl = head->as_CountedLoop(); 2560 Node* l = cl->outer_loop(); 2561 Node* tail = cl->outer_loop_tail(); 2562 IfNode* le = cl->outer_loop_end(); 2563 Node* sfpt = cl->outer_safepoint(); 2564 CountedLoopEndNode* cle = cl->loopexit(); 2565 CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop(); 2566 CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null(); 2567 Node* cle_out = cle->proj_out(false); 2568 2569 Node* new_sfpt = nullptr; 2570 Node* new_cle_out = cle_out->clone(); 2571 old_new.map(cle_out->_idx, new_cle_out); 2572 if (mode == CloneIncludesStripMined) { 2573 // clone outer loop body 2574 Node* new_l = l->clone(); 2575 Node* new_tail = tail->clone(); 2576 IfNode* new_le = le->clone()->as_If(); 2577 new_sfpt = sfpt->clone(); 2578 2579 set_loop(new_l, outer_loop->_parent); 2580 set_idom(new_l, new_l->in(LoopNode::EntryControl), dd); 2581 set_loop(new_cle_out, outer_loop->_parent); 2582 set_idom(new_cle_out, new_cle, dd); 2583 set_loop(new_sfpt, outer_loop->_parent); 2584 set_idom(new_sfpt, new_cle_out, dd); 2585 set_loop(new_le, outer_loop->_parent); 2586 set_idom(new_le, new_sfpt, dd); 2587 set_loop(new_tail, outer_loop->_parent); 2588 set_idom(new_tail, new_le, dd); 2589 set_idom(new_cl, new_l, dd); 2590 2591 old_new.map(l->_idx, new_l); 2592 old_new.map(tail->_idx, new_tail); 2593 old_new.map(le->_idx, new_le); 2594 old_new.map(sfpt->_idx, new_sfpt); 2595 2596 new_l->set_req(LoopNode::LoopBackControl, new_tail); 2597 new_l->set_req(0, new_l); 2598 new_tail->set_req(0, new_le); 2599 new_le->set_req(0, new_sfpt); 2600 new_sfpt->set_req(0, new_cle_out); 2601 new_cle_out->set_req(0, new_cle); 2602 new_cl->set_req(LoopNode::EntryControl, new_l); 2603 2604 _igvn.register_new_node_with_optimizer(new_l); 2605 _igvn.register_new_node_with_optimizer(new_tail); 2606 _igvn.register_new_node_with_optimizer(new_le); 2607 } else { 2608 Node *newhead = old_new[loop->_head->_idx]; 2609 newhead->as_Loop()->clear_strip_mined(); 2610 _igvn.replace_input_of(newhead, LoopNode::EntryControl, newhead->in(LoopNode::EntryControl)->in(LoopNode::EntryControl)); 2611 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd); 2612 } 2613 // Look at data node that were assigned a control in the outer 2614 // loop: they are kept in the outer loop by the safepoint so start 2615 // from the safepoint node's inputs. 2616 IdealLoopTree* outer_loop = get_loop(l); 2617 Node_Stack stack(2); 2618 stack.push(sfpt, 1); 2619 uint new_counter = C->unique(); 2620 while (stack.size() > 0) { 2621 Node* n = stack.node(); 2622 uint i = stack.index(); 2623 while (i < n->req() && 2624 (n->in(i) == nullptr || 2625 !has_ctrl(n->in(i)) || 2626 get_loop(get_ctrl(n->in(i))) != outer_loop || 2627 (old_new[n->in(i)->_idx] != nullptr && old_new[n->in(i)->_idx]->_idx >= new_counter))) { 2628 i++; 2629 } 2630 if (i < n->req()) { 2631 stack.set_index(i+1); 2632 stack.push(n->in(i), 0); 2633 } else { 2634 assert(old_new[n->_idx] == nullptr || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet"); 2635 Node* m = n == sfpt ? new_sfpt : n->clone(); 2636 if (m != nullptr) { 2637 for (uint i = 0; i < n->req(); i++) { 2638 if (m->in(i) != nullptr && old_new[m->in(i)->_idx] != nullptr) { 2639 m->set_req(i, old_new[m->in(i)->_idx]); 2640 } 2641 } 2642 } else { 2643 assert(n == sfpt && mode != CloneIncludesStripMined, "where's the safepoint clone?"); 2644 } 2645 if (n != sfpt) { 2646 extra_data_nodes.push(n); 2647 _igvn.register_new_node_with_optimizer(m); 2648 assert(get_ctrl(n) == cle_out, "what other control?"); 2649 set_ctrl(m, new_cle_out); 2650 old_new.map(n->_idx, m); 2651 } 2652 stack.pop(); 2653 } 2654 } 2655 if (mode == CloneIncludesStripMined) { 2656 _igvn.register_new_node_with_optimizer(new_sfpt); 2657 _igvn.register_new_node_with_optimizer(new_cle_out); 2658 } 2659 // Some other transformation may have pessimistically assigned some 2660 // data nodes to the outer loop. Set their control so they are out 2661 // of the outer loop. 2662 ResourceMark rm; 2663 Unique_Node_List wq; 2664 for (uint i = 0; i < extra_data_nodes.size(); i++) { 2665 Node* old = extra_data_nodes.at(i); 2666 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true); 2667 } 2668 2669 for (uint i = 0; i < loop->_body.size(); i++) { 2670 Node* old = loop->_body.at(i); 2671 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true); 2672 } 2673 2674 Node* inner_out = sfpt->in(0); 2675 if (inner_out->outcnt() > 1) { 2676 collect_nodes_in_outer_loop_not_reachable_from_sfpt(inner_out, loop, outer_loop, old_new, wq, this, true); 2677 } 2678 2679 Node* new_ctrl = cl->outer_loop_exit(); 2680 assert(get_loop(new_ctrl) != outer_loop, "must be out of the loop nest"); 2681 for (uint i = 0; i < wq.size(); i++) { 2682 Node* n = wq.at(i); 2683 set_ctrl(n, new_ctrl); 2684 if (n->in(0) != nullptr) { 2685 _igvn.replace_input_of(n, 0, new_ctrl); 2686 } 2687 collect_nodes_in_outer_loop_not_reachable_from_sfpt(n, loop, outer_loop, old_new, wq, this, false); 2688 } 2689 } else { 2690 Node *newhead = old_new[loop->_head->_idx]; 2691 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd); 2692 } 2693 } 2694 2695 //------------------------------clone_loop------------------------------------- 2696 // 2697 // C L O N E A L O O P B O D Y 2698 // 2699 // This is the basic building block of the loop optimizations. It clones an 2700 // entire loop body. It makes an old_new loop body mapping; with this mapping 2701 // you can find the new-loop equivalent to an old-loop node. All new-loop 2702 // nodes are exactly equal to their old-loop counterparts, all edges are the 2703 // same. All exits from the old-loop now have a RegionNode that merges the 2704 // equivalent new-loop path. This is true even for the normal "loop-exit" 2705 // condition. All uses of loop-invariant old-loop values now come from (one 2706 // or more) Phis that merge their new-loop equivalents. 2707 // 2708 // This operation leaves the graph in an illegal state: there are two valid 2709 // control edges coming from the loop pre-header to both loop bodies. I'll 2710 // definitely have to hack the graph after running this transform. 2711 // 2712 // From this building block I will further edit edges to perform loop peeling 2713 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc. 2714 // 2715 // Parameter side_by_size_idom: 2716 // When side_by_size_idom is null, the dominator tree is constructed for 2717 // the clone loop to dominate the original. Used in construction of 2718 // pre-main-post loop sequence. 2719 // When nonnull, the clone and original are side-by-side, both are 2720 // dominated by the side_by_side_idom node. Used in construction of 2721 // unswitched loops. 2722 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd, 2723 CloneLoopMode mode, Node* side_by_side_idom) { 2724 2725 LoopNode* head = loop->_head->as_Loop(); 2726 head->verify_strip_mined(1); 2727 2728 if (C->do_vector_loop() && PrintOpto) { 2729 const char* mname = C->method()->name()->as_quoted_ascii(); 2730 if (mname != nullptr) { 2731 tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname); 2732 } 2733 } 2734 2735 CloneMap& cm = C->clone_map(); 2736 if (C->do_vector_loop()) { 2737 cm.set_clone_idx(cm.max_gen()+1); 2738 #ifndef PRODUCT 2739 if (PrintOpto) { 2740 tty->print_cr("PhaseIdealLoop::clone_loop: _clone_idx %d", cm.clone_idx()); 2741 loop->dump_head(); 2742 } 2743 #endif 2744 } 2745 2746 // Step 1: Clone the loop body. Make the old->new mapping. 2747 clone_loop_body(loop->_body, old_new, &cm); 2748 2749 IdealLoopTree* outer_loop = (head->is_strip_mined() && mode != IgnoreStripMined) ? get_loop(head->as_CountedLoop()->outer_loop()) : loop; 2750 2751 // Step 2: Fix the edges in the new body. If the old input is outside the 2752 // loop use it. If the old input is INside the loop, use the corresponding 2753 // new node instead. 2754 fix_body_edges(loop->_body, loop, old_new, dd, outer_loop->_parent, false); 2755 2756 Node_List extra_data_nodes; // data nodes in the outer strip mined loop 2757 clone_outer_loop(head, mode, loop, outer_loop, dd, old_new, extra_data_nodes); 2758 2759 // Step 3: Now fix control uses. Loop varying control uses have already 2760 // been fixed up (as part of all input edges in Step 2). Loop invariant 2761 // control uses must be either an IfFalse or an IfTrue. Make a merge 2762 // point to merge the old and new IfFalse/IfTrue nodes; make the use 2763 // refer to this. 2764 Node_List worklist; 2765 uint new_counter = C->unique(); 2766 fix_ctrl_uses(loop->_body, loop, old_new, mode, side_by_side_idom, &cm, worklist); 2767 2768 // Step 4: If loop-invariant use is not control, it must be dominated by a 2769 // loop exit IfFalse/IfTrue. Find "proper" loop exit. Make a Region 2770 // there if needed. Make a Phi there merging old and new used values. 2771 Node_List *split_if_set = nullptr; 2772 Node_List *split_bool_set = nullptr; 2773 Node_List *split_cex_set = nullptr; 2774 fix_data_uses(loop->_body, loop, mode, outer_loop, new_counter, old_new, worklist, split_if_set, split_bool_set, split_cex_set); 2775 2776 for (uint i = 0; i < extra_data_nodes.size(); i++) { 2777 Node* old = extra_data_nodes.at(i); 2778 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set, 2779 split_bool_set, split_cex_set, worklist, new_counter, 2780 mode); 2781 } 2782 2783 // Check for IFs that need splitting/cloning. Happens if an IF outside of 2784 // the loop uses a condition set in the loop. The original IF probably 2785 // takes control from one or more OLD Regions (which in turn get from NEW 2786 // Regions). In any case, there will be a set of Phis for each merge point 2787 // from the IF up to where the original BOOL def exists the loop. 2788 finish_clone_loop(split_if_set, split_bool_set, split_cex_set); 2789 2790 } 2791 2792 void PhaseIdealLoop::finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set) { 2793 if (split_if_set) { 2794 while (split_if_set->size()) { 2795 Node *iff = split_if_set->pop(); 2796 uint input = iff->Opcode() == Op_AllocateArray ? AllocateNode::ValidLengthTest : 1; 2797 if (iff->in(input)->is_Phi()) { 2798 Node *b = clone_iff(iff->in(input)->as_Phi()); 2799 _igvn.replace_input_of(iff, input, b); 2800 } 2801 } 2802 } 2803 if (split_bool_set) { 2804 while (split_bool_set->size()) { 2805 Node *b = split_bool_set->pop(); 2806 Node *phi = b->in(1); 2807 assert(phi->is_Phi(), ""); 2808 CmpNode *cmp = clone_bool((PhiNode*) phi); 2809 _igvn.replace_input_of(b, 1, cmp); 2810 } 2811 } 2812 if (split_cex_set) { 2813 while (split_cex_set->size()) { 2814 Node *b = split_cex_set->pop(); 2815 assert(b->in(0)->is_Region(), ""); 2816 assert(b->in(1)->is_Phi(), ""); 2817 assert(b->in(0)->in(0) == b->in(1)->in(0), ""); 2818 split_up(b, b->in(0), nullptr); 2819 } 2820 } 2821 } 2822 2823 void PhaseIdealLoop::fix_data_uses(Node_List& body, IdealLoopTree* loop, CloneLoopMode mode, IdealLoopTree* outer_loop, 2824 uint new_counter, Node_List &old_new, Node_List &worklist, Node_List*& split_if_set, 2825 Node_List*& split_bool_set, Node_List*& split_cex_set) { 2826 for(uint i = 0; i < body.size(); i++ ) { 2827 Node* old = body.at(i); 2828 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set, 2829 split_bool_set, split_cex_set, worklist, new_counter, 2830 mode); 2831 } 2832 } 2833 2834 void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* loop, Node_List &old_new, CloneLoopMode mode, 2835 Node* side_by_side_idom, CloneMap* cm, Node_List &worklist) { 2836 LoopNode* head = loop->_head->as_Loop(); 2837 for(uint i = 0; i < body.size(); i++ ) { 2838 Node* old = body.at(i); 2839 if( !old->is_CFG() ) continue; 2840 2841 // Copy uses to a worklist, so I can munge the def-use info 2842 // with impunity. 2843 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) { 2844 worklist.push(old->fast_out(j)); 2845 } 2846 2847 while (worklist.size()) { // Visit all uses 2848 Node *use = worklist.pop(); 2849 if (!has_node(use)) continue; // Ignore dead nodes 2850 IdealLoopTree *use_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use ); 2851 if (!loop->is_member(use_loop) && use->is_CFG()) { 2852 // Both OLD and USE are CFG nodes here. 2853 assert(use->is_Proj(), "" ); 2854 Node* nnn = old_new[old->_idx]; 2855 2856 Node* newuse = nullptr; 2857 if (head->is_strip_mined() && mode != IgnoreStripMined) { 2858 CountedLoopNode* cl = head->as_CountedLoop(); 2859 CountedLoopEndNode* cle = cl->loopexit(); 2860 Node* cle_out = cle->proj_out_or_null(false); 2861 if (use == cle_out) { 2862 IfNode* le = cl->outer_loop_end(); 2863 use = le->proj_out(false); 2864 use_loop = get_loop(use); 2865 if (mode == CloneIncludesStripMined) { 2866 nnn = old_new[le->_idx]; 2867 } else { 2868 newuse = old_new[cle_out->_idx]; 2869 } 2870 } 2871 } 2872 if (newuse == nullptr) { 2873 newuse = use->clone(); 2874 } 2875 2876 // Clone the loop exit control projection 2877 if (C->do_vector_loop() && cm != nullptr) { 2878 cm->verify_insert_and_clone(use, newuse, cm->clone_idx()); 2879 } 2880 newuse->set_req(0,nnn); 2881 _igvn.register_new_node_with_optimizer(newuse); 2882 set_loop(newuse, use_loop); 2883 set_idom(newuse, nnn, dom_depth(nnn) + 1 ); 2884 2885 // We need a Region to merge the exit from the peeled body and the 2886 // exit from the old loop body. 2887 RegionNode *r = new RegionNode(3); 2888 uint dd_r = MIN2(dom_depth(newuse), dom_depth(use)); 2889 assert(dd_r >= dom_depth(dom_lca(newuse, use)), "" ); 2890 2891 // The original user of 'use' uses 'r' instead. 2892 for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) { 2893 Node* useuse = use->last_out(l); 2894 _igvn.rehash_node_delayed(useuse); 2895 uint uses_found = 0; 2896 if (useuse->in(0) == use) { 2897 useuse->set_req(0, r); 2898 uses_found++; 2899 if (useuse->is_CFG()) { 2900 // This is not a dom_depth > dd_r because when new 2901 // control flow is constructed by a loop opt, a node and 2902 // its dominator can end up at the same dom_depth 2903 assert(dom_depth(useuse) >= dd_r, ""); 2904 set_idom(useuse, r, dom_depth(useuse)); 2905 } 2906 } 2907 for (uint k = 1; k < useuse->req(); k++) { 2908 if( useuse->in(k) == use ) { 2909 useuse->set_req(k, r); 2910 uses_found++; 2911 if (useuse->is_Loop() && k == LoopNode::EntryControl) { 2912 // This is not a dom_depth > dd_r because when new 2913 // control flow is constructed by a loop opt, a node 2914 // and its dominator can end up at the same dom_depth 2915 assert(dom_depth(useuse) >= dd_r , ""); 2916 set_idom(useuse, r, dom_depth(useuse)); 2917 } 2918 } 2919 } 2920 l -= uses_found; // we deleted 1 or more copies of this edge 2921 } 2922 2923 assert(use->is_Proj(), "loop exit should be projection"); 2924 // lazy_replace() below moves all nodes that are: 2925 // - control dependent on the loop exit or 2926 // - have control set to the loop exit 2927 // below the post-loop merge point. lazy_replace() takes a dead control as first input. To make it 2928 // possible to use it, the loop exit projection is cloned and becomes the new exit projection. The initial one 2929 // becomes dead and is "replaced" by the region. 2930 Node* use_clone = use->clone(); 2931 register_control(use_clone, use_loop, idom(use), dom_depth(use)); 2932 // Now finish up 'r' 2933 r->set_req(1, newuse); 2934 r->set_req(2, use_clone); 2935 _igvn.register_new_node_with_optimizer(r); 2936 set_loop(r, use_loop); 2937 set_idom(r, (side_by_side_idom == nullptr) ? newuse->in(0) : side_by_side_idom, dd_r); 2938 lazy_replace(use, r); 2939 // Map the (cloned) old use to the new merge point 2940 old_new.map(use_clone->_idx, r); 2941 } // End of if a loop-exit test 2942 } 2943 } 2944 } 2945 2946 void PhaseIdealLoop::fix_body_edges(const Node_List &body, IdealLoopTree* loop, const Node_List &old_new, int dd, 2947 IdealLoopTree* parent, bool partial) { 2948 for(uint i = 0; i < body.size(); i++ ) { 2949 Node *old = body.at(i); 2950 Node *nnn = old_new[old->_idx]; 2951 // Fix CFG/Loop controlling the new node 2952 if (has_ctrl(old)) { 2953 set_ctrl(nnn, old_new[get_ctrl(old)->_idx]); 2954 } else { 2955 set_loop(nnn, parent); 2956 if (old->outcnt() > 0) { 2957 Node* dom = idom(old); 2958 if (old_new[dom->_idx] != nullptr) { 2959 dom = old_new[dom->_idx]; 2960 set_idom(nnn, dom, dd ); 2961 } 2962 } 2963 } 2964 // Correct edges to the new node 2965 for (uint j = 0; j < nnn->req(); j++) { 2966 Node *n = nnn->in(j); 2967 if (n != nullptr) { 2968 IdealLoopTree *old_in_loop = get_loop(has_ctrl(n) ? get_ctrl(n) : n); 2969 if (loop->is_member(old_in_loop)) { 2970 if (old_new[n->_idx] != nullptr) { 2971 nnn->set_req(j, old_new[n->_idx]); 2972 } else { 2973 assert(!body.contains(n), ""); 2974 assert(partial, "node not cloned"); 2975 } 2976 } 2977 } 2978 } 2979 _igvn.hash_find_insert(nnn); 2980 } 2981 } 2982 2983 void PhaseIdealLoop::clone_loop_body(const Node_List& body, Node_List &old_new, CloneMap* cm) { 2984 for (uint i = 0; i < body.size(); i++) { 2985 Node* old = body.at(i); 2986 Node* nnn = old->clone(); 2987 old_new.map(old->_idx, nnn); 2988 if (C->do_vector_loop() && cm != nullptr) { 2989 cm->verify_insert_and_clone(old, nnn, cm->clone_idx()); 2990 } 2991 _igvn.register_new_node_with_optimizer(nnn); 2992 } 2993 } 2994 2995 2996 //---------------------- stride_of_possible_iv ------------------------------------- 2997 // Looks for an iff/bool/comp with one operand of the compare 2998 // being a cycle involving an add and a phi, 2999 // with an optional truncation (left-shift followed by a right-shift) 3000 // of the add. Returns zero if not an iv. 3001 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) { 3002 Node* trunc1 = nullptr; 3003 Node* trunc2 = nullptr; 3004 const TypeInteger* ttype = nullptr; 3005 if (!iff->is_If() || iff->in(1) == nullptr || !iff->in(1)->is_Bool()) { 3006 return 0; 3007 } 3008 BoolNode* bl = iff->in(1)->as_Bool(); 3009 Node* cmp = bl->in(1); 3010 if (!cmp || (cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU)) { 3011 return 0; 3012 } 3013 // Must have an invariant operand 3014 if (is_member(get_loop(iff), get_ctrl(cmp->in(2)))) { 3015 return 0; 3016 } 3017 Node* add2 = nullptr; 3018 Node* cmp1 = cmp->in(1); 3019 if (cmp1->is_Phi()) { 3020 // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) ))) 3021 Node* phi = cmp1; 3022 for (uint i = 1; i < phi->req(); i++) { 3023 Node* in = phi->in(i); 3024 Node* add = CountedLoopNode::match_incr_with_optional_truncation(in, 3025 &trunc1, &trunc2, &ttype, T_INT); 3026 if (add && add->in(1) == phi) { 3027 add2 = add->in(2); 3028 break; 3029 } 3030 } 3031 } else { 3032 // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) ))) 3033 Node* addtrunc = cmp1; 3034 Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc, 3035 &trunc1, &trunc2, &ttype, T_INT); 3036 if (add && add->in(1)->is_Phi()) { 3037 Node* phi = add->in(1); 3038 for (uint i = 1; i < phi->req(); i++) { 3039 if (phi->in(i) == addtrunc) { 3040 add2 = add->in(2); 3041 break; 3042 } 3043 } 3044 } 3045 } 3046 if (add2 != nullptr) { 3047 const TypeInt* add2t = _igvn.type(add2)->is_int(); 3048 if (add2t->is_con()) { 3049 return add2t->get_con(); 3050 } 3051 } 3052 return 0; 3053 } 3054 3055 3056 //---------------------- stay_in_loop ------------------------------------- 3057 // Return the (unique) control output node that's in the loop (if it exists.) 3058 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) { 3059 Node* unique = nullptr; 3060 if (!n) return nullptr; 3061 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3062 Node* use = n->fast_out(i); 3063 if (!has_ctrl(use) && loop->is_member(get_loop(use))) { 3064 if (unique != nullptr) { 3065 return nullptr; 3066 } 3067 unique = use; 3068 } 3069 } 3070 return unique; 3071 } 3072 3073 //------------------------------ register_node ------------------------------------- 3074 // Utility to register node "n" with PhaseIdealLoop 3075 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree* loop, Node* pred, uint ddepth) { 3076 _igvn.register_new_node_with_optimizer(n); 3077 loop->_body.push(n); 3078 if (n->is_CFG()) { 3079 set_loop(n, loop); 3080 set_idom(n, pred, ddepth); 3081 } else { 3082 set_ctrl(n, pred); 3083 } 3084 } 3085 3086 //------------------------------ proj_clone ------------------------------------- 3087 // Utility to create an if-projection 3088 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) { 3089 ProjNode* c = p->clone()->as_Proj(); 3090 c->set_req(0, iff); 3091 return c; 3092 } 3093 3094 //------------------------------ short_circuit_if ------------------------------------- 3095 // Force the iff control output to be the live_proj 3096 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) { 3097 guarantee(live_proj != nullptr, "null projection"); 3098 int proj_con = live_proj->_con; 3099 assert(proj_con == 0 || proj_con == 1, "false or true projection"); 3100 Node* con = intcon(proj_con); 3101 if (iff) { 3102 iff->set_req(1, con); 3103 } 3104 return con; 3105 } 3106 3107 //------------------------------ insert_if_before_proj ------------------------------------- 3108 // Insert a new if before an if projection (* - new node) 3109 // 3110 // before 3111 // if(test) 3112 // / \ 3113 // v v 3114 // other-proj proj (arg) 3115 // 3116 // after 3117 // if(test) 3118 // / \ 3119 // / v 3120 // | * proj-clone 3121 // v | 3122 // other-proj v 3123 // * new_if(relop(cmp[IU](left,right))) 3124 // / \ 3125 // v v 3126 // * new-proj proj 3127 // (returned) 3128 // 3129 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) { 3130 IfNode* iff = proj->in(0)->as_If(); 3131 IdealLoopTree *loop = get_loop(proj); 3132 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj(); 3133 uint ddepth = dom_depth(proj); 3134 3135 _igvn.rehash_node_delayed(iff); 3136 _igvn.rehash_node_delayed(proj); 3137 3138 proj->set_req(0, nullptr); // temporary disconnect 3139 ProjNode* proj2 = proj_clone(proj, iff); 3140 register_node(proj2, loop, iff, ddepth); 3141 3142 Node* cmp = Signed ? (Node*) new CmpINode(left, right) : (Node*) new CmpUNode(left, right); 3143 register_node(cmp, loop, proj2, ddepth); 3144 3145 BoolNode* bol = new BoolNode(cmp, relop); 3146 register_node(bol, loop, proj2, ddepth); 3147 3148 int opcode = iff->Opcode(); 3149 assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode"); 3150 IfNode* new_if = IfNode::make_with_same_profile(iff, proj2, bol); 3151 register_node(new_if, loop, proj2, ddepth); 3152 3153 proj->set_req(0, new_if); // reattach 3154 set_idom(proj, new_if, ddepth); 3155 3156 ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj(); 3157 guarantee(new_exit != nullptr, "null exit node"); 3158 register_node(new_exit, get_loop(other_proj), new_if, ddepth); 3159 3160 return new_exit; 3161 } 3162 3163 //------------------------------ insert_region_before_proj ------------------------------------- 3164 // Insert a region before an if projection (* - new node) 3165 // 3166 // before 3167 // if(test) 3168 // / | 3169 // v | 3170 // proj v 3171 // other-proj 3172 // 3173 // after 3174 // if(test) 3175 // / | 3176 // v | 3177 // * proj-clone v 3178 // | other-proj 3179 // v 3180 // * new-region 3181 // | 3182 // v 3183 // * dum_if 3184 // / \ 3185 // v \ 3186 // * dum-proj v 3187 // proj 3188 // 3189 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) { 3190 IfNode* iff = proj->in(0)->as_If(); 3191 IdealLoopTree *loop = get_loop(proj); 3192 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj(); 3193 uint ddepth = dom_depth(proj); 3194 3195 _igvn.rehash_node_delayed(iff); 3196 _igvn.rehash_node_delayed(proj); 3197 3198 proj->set_req(0, nullptr); // temporary disconnect 3199 ProjNode* proj2 = proj_clone(proj, iff); 3200 register_node(proj2, loop, iff, ddepth); 3201 3202 RegionNode* reg = new RegionNode(2); 3203 reg->set_req(1, proj2); 3204 register_node(reg, loop, iff, ddepth); 3205 3206 IfNode* dum_if = new IfNode(reg, short_circuit_if(nullptr, proj), iff->_prob, iff->_fcnt); 3207 register_node(dum_if, loop, reg, ddepth); 3208 3209 proj->set_req(0, dum_if); // reattach 3210 set_idom(proj, dum_if, ddepth); 3211 3212 ProjNode* dum_proj = proj_clone(other_proj, dum_if); 3213 register_node(dum_proj, loop, dum_if, ddepth); 3214 3215 return reg; 3216 } 3217 3218 // Idea 3219 // ---- 3220 // Partial Peeling tries to rotate the loop in such a way that it can later be turned into a counted loop. Counted loops 3221 // require a signed loop exit test. When calling this method, we've only found a suitable unsigned test to partial peel 3222 // with. Therefore, we try to split off a signed loop exit test from the unsigned test such that it can be used as new 3223 // loop exit while keeping the unsigned test unchanged and preserving the same behavior as if we've used the unsigned 3224 // test alone instead: 3225 // 3226 // Before Partial Peeling: 3227 // Loop: 3228 // <peeled section> 3229 // Split off signed loop exit test 3230 // <-- CUT HERE --> 3231 // Unchanged unsigned loop exit test 3232 // <rest of unpeeled section> 3233 // goto Loop 3234 // 3235 // After Partial Peeling: 3236 // <cloned peeled section> 3237 // Cloned split off signed loop exit test 3238 // Loop: 3239 // Unchanged unsigned loop exit test 3240 // <rest of unpeeled section> 3241 // <peeled section> 3242 // Split off signed loop exit test 3243 // goto Loop 3244 // 3245 // Details 3246 // ------- 3247 // Before: 3248 // if (i <u limit) Unsigned loop exit condition 3249 // / | 3250 // v v 3251 // exit-proj stay-in-loop-proj 3252 // 3253 // Split off a signed loop exit test (i.e. with CmpI) from an unsigned loop exit test (i.e. with CmpU) and insert it 3254 // before the CmpU on the stay-in-loop path and keep both tests: 3255 // 3256 // if (i <u limit) Signed loop exit test 3257 // / | 3258 // / if (i <u limit) Unsigned loop exit test 3259 // / / | 3260 // v v v 3261 // exit-region stay-in-loop-proj 3262 // 3263 // Implementation 3264 // -------------- 3265 // We need to make sure that the new signed loop exit test is properly inserted into the graph such that the unsigned 3266 // loop exit test still dominates the same set of control nodes, the ctrl() relation from data nodes to both loop 3267 // exit tests is preserved, and their loop nesting is correct. 3268 // 3269 // To achieve that, we clone the unsigned loop exit test completely (leave it unchanged), insert the signed loop exit 3270 // test above it and kill the original unsigned loop exit test by setting it's condition to a constant 3271 // (i.e. stay-in-loop-const in graph below) such that IGVN can fold it later: 3272 // 3273 // if (stay-in-loop-const) Killed original unsigned loop exit test 3274 // / | 3275 // / v 3276 // / if (i < limit) Split off signed loop exit test 3277 // / / | 3278 // / / v 3279 // / / if (i <u limit) Cloned unsigned loop exit test 3280 // / / / | 3281 // v v v | 3282 // exit-region | 3283 // | | 3284 // dummy-if | 3285 // / | | 3286 // dead | | 3287 // v v 3288 // exit-proj stay-in-loop-proj 3289 // 3290 // Note: The dummy-if is inserted to create a region to merge the loop exits between the original to be killed unsigned 3291 // loop exit test and its exit projection while keeping the exit projection (also see insert_region_before_proj()). 3292 // 3293 // Requirements 3294 // ------------ 3295 // Note that we can only split off a signed loop exit test from the unsigned loop exit test when the behavior is exactly 3296 // the same as before with only a single unsigned test. This is only possible if certain requirements are met. 3297 // Otherwise, we need to bail out (see comments in the code below). 3298 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree* loop) { 3299 const bool Signed = true; 3300 const bool Unsigned = false; 3301 3302 BoolNode* bol = if_cmpu->in(1)->as_Bool(); 3303 if (bol->_test._test != BoolTest::lt) { 3304 return nullptr; 3305 } 3306 CmpNode* cmpu = bol->in(1)->as_Cmp(); 3307 assert(cmpu->Opcode() == Op_CmpU, "must be unsigned comparison"); 3308 3309 int stride = stride_of_possible_iv(if_cmpu); 3310 if (stride == 0) { 3311 return nullptr; 3312 } 3313 3314 Node* lp_proj = stay_in_loop(if_cmpu, loop); 3315 guarantee(lp_proj != nullptr, "null loop node"); 3316 3317 ProjNode* lp_continue = lp_proj->as_Proj(); 3318 ProjNode* lp_exit = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj(); 3319 if (!lp_exit->is_IfFalse()) { 3320 // The loop exit condition is (i <u limit) ==> (i >= 0 && i < limit). 3321 // We therefore can't add a single exit condition. 3322 return nullptr; 3323 } 3324 // The unsigned loop exit condition is 3325 // !(i <u limit) 3326 // = i >=u limit 3327 // 3328 // First, we note that for any x for which 3329 // 0 <= x <= INT_MAX 3330 // we can convert x to an unsigned int and still get the same guarantee: 3331 // 0 <= (uint) x <= INT_MAX = (uint) INT_MAX 3332 // 0 <=u (uint) x <=u INT_MAX = (uint) INT_MAX (LEMMA) 3333 // 3334 // With that in mind, if 3335 // limit >= 0 (COND) 3336 // then the unsigned loop exit condition 3337 // i >=u limit (ULE) 3338 // is equivalent to 3339 // i < 0 || i >= limit (SLE-full) 3340 // because either i is negative and therefore always greater than MAX_INT when converting to unsigned 3341 // (uint) i >=u MAX_INT >= limit >= 0 3342 // or otherwise 3343 // i >= limit >= 0 3344 // holds due to (LEMMA). 3345 // 3346 // For completeness, a counterexample with limit < 0: 3347 // Assume i = -3 and limit = -2: 3348 // i < 0 3349 // -2 < 0 3350 // is true and thus also "i < 0 || i >= limit". But 3351 // i >=u limit 3352 // -3 >=u -2 3353 // is false. 3354 Node* limit = cmpu->in(2); 3355 const TypeInt* type_limit = _igvn.type(limit)->is_int(); 3356 if (type_limit->_lo < 0) { 3357 return nullptr; 3358 } 3359 3360 // We prove below that we can extract a single signed loop exit condition from (SLE-full), depending on the stride: 3361 // stride < 0: 3362 // i < 0 (SLE = SLE-negative) 3363 // stride > 0: 3364 // i >= limit (SLE = SLE-positive) 3365 // such that we have the following graph before Partial Peeling with stride > 0 (similar for stride < 0): 3366 // 3367 // Loop: 3368 // <peeled section> 3369 // i >= limit (SLE-positive) 3370 // <-- CUT HERE --> 3371 // i >=u limit (ULE) 3372 // <rest of unpeeled section> 3373 // goto Loop 3374 // 3375 // We exit the loop if: 3376 // (SLE) is true OR (ULE) is true 3377 // However, if (SLE) is true then (ULE) also needs to be true to ensure the exact same behavior. Otherwise, we wrongly 3378 // exit a loop that should not have been exited if we did not apply Partial Peeling. More formally, we need to ensure: 3379 // (SLE) IMPLIES (ULE) 3380 // This indeed holds when (COND) is given: 3381 // - stride > 0: 3382 // i >= limit // (SLE = SLE-positive) 3383 // i >= limit >= 0 // (COND) 3384 // i >=u limit >= 0 // (LEMMA) 3385 // which is the unsigned loop exit condition (ULE). 3386 // - stride < 0: 3387 // i < 0 // (SLE = SLE-negative) 3388 // (uint) i >u MAX_INT // (NEG) all negative values are greater than MAX_INT when converted to unsigned 3389 // MAX_INT >= limit >= 0 // (COND) 3390 // MAX_INT >=u limit >= 0 // (LEMMA) 3391 // and thus from (NEG) and (LEMMA): 3392 // i >=u limit 3393 // which is the unsigned loop exit condition (ULE). 3394 // 3395 // 3396 // After Partial Peeling, we have the following structure for stride > 0 (similar for stride < 0): 3397 // <cloned peeled section> 3398 // i >= limit (SLE-positive) 3399 // Loop: 3400 // i >=u limit (ULE) 3401 // <rest of unpeeled section> 3402 // <peeled section> 3403 // i >= limit (SLE-positive) 3404 // goto Loop 3405 Node* rhs_cmpi; 3406 if (stride > 0) { 3407 rhs_cmpi = limit; // For i >= limit 3408 } else { 3409 rhs_cmpi = makecon(TypeInt::ZERO); // For i < 0 3410 } 3411 // Create a new region on the exit path 3412 RegionNode* reg = insert_region_before_proj(lp_exit); 3413 guarantee(reg != nullptr, "null region node"); 3414 3415 // Clone the if-cmpu-true-false using a signed compare 3416 BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge; 3417 ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, rhs_cmpi, lp_continue); 3418 reg->add_req(cmpi_exit); 3419 3420 // Clone the if-cmpu-true-false 3421 BoolTest::mask rel_u = bol->_test._test; 3422 ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue); 3423 reg->add_req(cmpu_exit); 3424 3425 // Force original if to stay in loop. 3426 short_circuit_if(if_cmpu, lp_continue); 3427 3428 return cmpi_exit->in(0)->as_If(); 3429 } 3430 3431 //------------------------------ remove_cmpi_loop_exit ------------------------------------- 3432 // Remove a previously inserted signed compare loop exit. 3433 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) { 3434 Node* lp_proj = stay_in_loop(if_cmp, loop); 3435 assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI && 3436 stay_in_loop(lp_proj, loop)->is_If() && 3437 stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu"); 3438 Node* con = makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO); 3439 if_cmp->set_req(1, con); 3440 } 3441 3442 //------------------------------ scheduled_nodelist ------------------------------------- 3443 // Create a post order schedule of nodes that are in the 3444 // "member" set. The list is returned in "sched". 3445 // The first node in "sched" is the loop head, followed by 3446 // nodes which have no inputs in the "member" set, and then 3447 // followed by the nodes that have an immediate input dependence 3448 // on a node in "sched". 3449 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) { 3450 3451 assert(member.test(loop->_head->_idx), "loop head must be in member set"); 3452 VectorSet visited; 3453 Node_Stack nstack(loop->_body.size()); 3454 3455 Node* n = loop->_head; // top of stack is cached in "n" 3456 uint idx = 0; 3457 visited.set(n->_idx); 3458 3459 // Initially push all with no inputs from within member set 3460 for(uint i = 0; i < loop->_body.size(); i++ ) { 3461 Node *elt = loop->_body.at(i); 3462 if (member.test(elt->_idx)) { 3463 bool found = false; 3464 for (uint j = 0; j < elt->req(); j++) { 3465 Node* def = elt->in(j); 3466 if (def && member.test(def->_idx) && def != elt) { 3467 found = true; 3468 break; 3469 } 3470 } 3471 if (!found && elt != loop->_head) { 3472 nstack.push(n, idx); 3473 n = elt; 3474 assert(!visited.test(n->_idx), "not seen yet"); 3475 visited.set(n->_idx); 3476 } 3477 } 3478 } 3479 3480 // traverse out's that are in the member set 3481 while (true) { 3482 if (idx < n->outcnt()) { 3483 Node* use = n->raw_out(idx); 3484 idx++; 3485 if (!visited.test_set(use->_idx)) { 3486 if (member.test(use->_idx)) { 3487 nstack.push(n, idx); 3488 n = use; 3489 idx = 0; 3490 } 3491 } 3492 } else { 3493 // All outputs processed 3494 sched.push(n); 3495 if (nstack.is_empty()) break; 3496 n = nstack.node(); 3497 idx = nstack.index(); 3498 nstack.pop(); 3499 } 3500 } 3501 } 3502 3503 3504 //------------------------------ has_use_in_set ------------------------------------- 3505 // Has a use in the vector set 3506 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) { 3507 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 3508 Node* use = n->fast_out(j); 3509 if (vset.test(use->_idx)) { 3510 return true; 3511 } 3512 } 3513 return false; 3514 } 3515 3516 3517 //------------------------------ has_use_internal_to_set ------------------------------------- 3518 // Has use internal to the vector set (ie. not in a phi at the loop head) 3519 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) { 3520 Node* head = loop->_head; 3521 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 3522 Node* use = n->fast_out(j); 3523 if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) { 3524 return true; 3525 } 3526 } 3527 return false; 3528 } 3529 3530 3531 //------------------------------ clone_for_use_outside_loop ------------------------------------- 3532 // clone "n" for uses that are outside of loop 3533 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) { 3534 int cloned = 0; 3535 assert(worklist.size() == 0, "should be empty"); 3536 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 3537 Node* use = n->fast_out(j); 3538 if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) { 3539 worklist.push(use); 3540 } 3541 } 3542 3543 if (C->check_node_count(worklist.size() + NodeLimitFudgeFactor, 3544 "Too many clones required in clone_for_use_outside_loop in partial peeling")) { 3545 return -1; 3546 } 3547 3548 while( worklist.size() ) { 3549 Node *use = worklist.pop(); 3550 if (!has_node(use) || use->in(0) == C->top()) continue; 3551 uint j; 3552 for (j = 0; j < use->req(); j++) { 3553 if (use->in(j) == n) break; 3554 } 3555 assert(j < use->req(), "must be there"); 3556 3557 // clone "n" and insert it between the inputs of "n" and the use outside the loop 3558 Node* n_clone = n->clone(); 3559 _igvn.replace_input_of(use, j, n_clone); 3560 cloned++; 3561 Node* use_c; 3562 if (!use->is_Phi()) { 3563 use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0); 3564 } else { 3565 // Use in a phi is considered a use in the associated predecessor block 3566 use_c = use->in(0)->in(j); 3567 } 3568 set_ctrl(n_clone, use_c); 3569 assert(!loop->is_member(get_loop(use_c)), "should be outside loop"); 3570 get_loop(use_c)->_body.push(n_clone); 3571 _igvn.register_new_node_with_optimizer(n_clone); 3572 #ifndef PRODUCT 3573 if (TracePartialPeeling) { 3574 tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx); 3575 } 3576 #endif 3577 } 3578 return cloned; 3579 } 3580 3581 3582 //------------------------------ clone_for_special_use_inside_loop ------------------------------------- 3583 // clone "n" for special uses that are in the not_peeled region. 3584 // If these def-uses occur in separate blocks, the code generator 3585 // marks the method as not compilable. For example, if a "BoolNode" 3586 // is in a different basic block than the "IfNode" that uses it, then 3587 // the compilation is aborted in the code generator. 3588 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n, 3589 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) { 3590 if (n->is_Phi() || n->is_Load()) { 3591 return; 3592 } 3593 assert(worklist.size() == 0, "should be empty"); 3594 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 3595 Node* use = n->fast_out(j); 3596 if ( not_peel.test(use->_idx) && 3597 (use->is_If() || use->is_CMove() || use->is_Bool()) && 3598 use->in(1) == n) { 3599 worklist.push(use); 3600 } 3601 } 3602 if (worklist.size() > 0) { 3603 // clone "n" and insert it between inputs of "n" and the use 3604 Node* n_clone = n->clone(); 3605 loop->_body.push(n_clone); 3606 _igvn.register_new_node_with_optimizer(n_clone); 3607 set_ctrl(n_clone, get_ctrl(n)); 3608 sink_list.push(n_clone); 3609 not_peel.set(n_clone->_idx); 3610 #ifndef PRODUCT 3611 if (TracePartialPeeling) { 3612 tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx); 3613 } 3614 #endif 3615 while( worklist.size() ) { 3616 Node *use = worklist.pop(); 3617 _igvn.rehash_node_delayed(use); 3618 for (uint j = 1; j < use->req(); j++) { 3619 if (use->in(j) == n) { 3620 use->set_req(j, n_clone); 3621 } 3622 } 3623 } 3624 } 3625 } 3626 3627 3628 //------------------------------ insert_phi_for_loop ------------------------------------- 3629 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist 3630 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) { 3631 Node *phi = PhiNode::make(lp, back_edge_val); 3632 phi->set_req(LoopNode::EntryControl, lp_entry_val); 3633 // Use existing phi if it already exists 3634 Node *hit = _igvn.hash_find_insert(phi); 3635 if( hit == nullptr ) { 3636 _igvn.register_new_node_with_optimizer(phi); 3637 set_ctrl(phi, lp); 3638 } else { 3639 // Remove the new phi from the graph and use the hit 3640 _igvn.remove_dead_node(phi); 3641 phi = hit; 3642 } 3643 _igvn.replace_input_of(use, idx, phi); 3644 } 3645 3646 #ifdef ASSERT 3647 //------------------------------ is_valid_loop_partition ------------------------------------- 3648 // Validate the loop partition sets: peel and not_peel 3649 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, 3650 VectorSet& not_peel ) { 3651 uint i; 3652 // Check that peel_list entries are in the peel set 3653 for (i = 0; i < peel_list.size(); i++) { 3654 if (!peel.test(peel_list.at(i)->_idx)) { 3655 return false; 3656 } 3657 } 3658 // Check at loop members are in one of peel set or not_peel set 3659 for (i = 0; i < loop->_body.size(); i++ ) { 3660 Node *def = loop->_body.at(i); 3661 uint di = def->_idx; 3662 // Check that peel set elements are in peel_list 3663 if (peel.test(di)) { 3664 if (not_peel.test(di)) { 3665 return false; 3666 } 3667 // Must be in peel_list also 3668 bool found = false; 3669 for (uint j = 0; j < peel_list.size(); j++) { 3670 if (peel_list.at(j)->_idx == di) { 3671 found = true; 3672 break; 3673 } 3674 } 3675 if (!found) { 3676 return false; 3677 } 3678 } else if (not_peel.test(di)) { 3679 if (peel.test(di)) { 3680 return false; 3681 } 3682 } else { 3683 return false; 3684 } 3685 } 3686 return true; 3687 } 3688 3689 //------------------------------ is_valid_clone_loop_exit_use ------------------------------------- 3690 // Ensure a use outside of loop is of the right form 3691 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) { 3692 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use; 3693 return (use->is_Phi() && 3694 use_c->is_Region() && use_c->req() == 3 && 3695 (use_c->in(exit_idx)->Opcode() == Op_IfTrue || 3696 use_c->in(exit_idx)->Opcode() == Op_IfFalse || 3697 use_c->in(exit_idx)->Opcode() == Op_JumpProj) && 3698 loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) ); 3699 } 3700 3701 //------------------------------ is_valid_clone_loop_form ------------------------------------- 3702 // Ensure that all uses outside of loop are of the right form 3703 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list, 3704 uint orig_exit_idx, uint clone_exit_idx) { 3705 uint len = peel_list.size(); 3706 for (uint i = 0; i < len; i++) { 3707 Node *def = peel_list.at(i); 3708 3709 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { 3710 Node *use = def->fast_out(j); 3711 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use; 3712 if (!loop->is_member(get_loop(use_c))) { 3713 // use is not in the loop, check for correct structure 3714 if (use->in(0) == def) { 3715 // Okay 3716 } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) { 3717 return false; 3718 } 3719 } 3720 } 3721 } 3722 return true; 3723 } 3724 #endif 3725 3726 //------------------------------ partial_peel ------------------------------------- 3727 // Partially peel (aka loop rotation) the top portion of a loop (called 3728 // the peel section below) by cloning it and placing one copy just before 3729 // the new loop head and the other copy at the bottom of the new loop. 3730 // 3731 // before after where it came from 3732 // 3733 // stmt1 stmt1 3734 // loop: stmt2 clone 3735 // stmt2 if condA goto exitA clone 3736 // if condA goto exitA new_loop: new 3737 // stmt3 stmt3 clone 3738 // if !condB goto loop if condB goto exitB clone 3739 // exitB: stmt2 orig 3740 // stmt4 if !condA goto new_loop orig 3741 // exitA: goto exitA 3742 // exitB: 3743 // stmt4 3744 // exitA: 3745 // 3746 // Step 1: find the cut point: an exit test on probable 3747 // induction variable. 3748 // Step 2: schedule (with cloning) operations in the peel 3749 // section that can be executed after the cut into 3750 // the section that is not peeled. This may need 3751 // to clone operations into exit blocks. For 3752 // instance, a reference to A[i] in the not-peel 3753 // section and a reference to B[i] in an exit block 3754 // may cause a left-shift of i by 2 to be placed 3755 // in the peel block. This step will clone the left 3756 // shift into the exit block and sink the left shift 3757 // from the peel to the not-peel section. 3758 // Step 3: clone the loop, retarget the control, and insert 3759 // phis for values that are live across the new loop 3760 // head. This is very dependent on the graph structure 3761 // from clone_loop. It creates region nodes for 3762 // exit control and associated phi nodes for values 3763 // flow out of the loop through that exit. The region 3764 // node is dominated by the clone's control projection. 3765 // So the clone's peel section is placed before the 3766 // new loop head, and the clone's not-peel section is 3767 // forms the top part of the new loop. The original 3768 // peel section forms the tail of the new loop. 3769 // Step 4: update the dominator tree and recompute the 3770 // dominator depth. 3771 // 3772 // orig 3773 // 3774 // stmt1 3775 // | 3776 // v 3777 // predicates 3778 // | 3779 // v 3780 // loop<----+ 3781 // | | 3782 // stmt2 | 3783 // | | 3784 // v | 3785 // ifA | 3786 // / | | 3787 // v v | 3788 // false true ^ <-- last_peel 3789 // / | | 3790 // / ===|==cut | 3791 // / stmt3 | <-- first_not_peel 3792 // / | | 3793 // | v | 3794 // v ifB | 3795 // exitA: / \ | 3796 // / \ | 3797 // v v | 3798 // false true | 3799 // / \ | 3800 // / ----+ 3801 // | 3802 // v 3803 // exitB: 3804 // stmt4 3805 // 3806 // 3807 // after clone loop 3808 // 3809 // stmt1 3810 // | 3811 // v 3812 // predicates 3813 // / \ 3814 // clone / \ orig 3815 // / \ 3816 // / \ 3817 // v v 3818 // +---->loop loop<----+ 3819 // | | | | 3820 // | stmt2 stmt2 | 3821 // | | | | 3822 // | v v | 3823 // | ifA ifA | 3824 // | | \ / | | 3825 // | v v v v | 3826 // ^ true false false true ^ <-- last_peel 3827 // | | ^ \ / | | 3828 // | cut==|== \ \ / ===|==cut | 3829 // | stmt3 \ \ / stmt3 | <-- first_not_peel 3830 // | | dom | | | | 3831 // | v \ 1v v2 v | 3832 // | ifB regionA ifB | 3833 // | / \ | / \ | 3834 // | / \ v / \ | 3835 // | v v exitA: v v | 3836 // | true false false true | 3837 // | / ^ \ / \ | 3838 // +---- \ \ / ----+ 3839 // dom \ / 3840 // \ 1v v2 3841 // regionB 3842 // | 3843 // v 3844 // exitB: 3845 // stmt4 3846 // 3847 // 3848 // after partial peel 3849 // 3850 // stmt1 3851 // | 3852 // v 3853 // predicates 3854 // / 3855 // clone / orig 3856 // / TOP 3857 // / \ 3858 // v v 3859 // TOP->loop loop----+ 3860 // | | | 3861 // stmt2 stmt2 | 3862 // | | | 3863 // v v | 3864 // ifA ifA | 3865 // | \ / | | 3866 // v v v v | 3867 // true false false true | <-- last_peel 3868 // | ^ \ / +------|---+ 3869 // +->newloop \ \ / === ==cut | | 3870 // | stmt3 \ \ / TOP | | 3871 // | | dom | | stmt3 | | <-- first_not_peel 3872 // | v \ 1v v2 v | | 3873 // | ifB regionA ifB ^ v 3874 // | / \ | / \ | | 3875 // | / \ v / \ | | 3876 // | v v exitA: v v | | 3877 // | true false false true | | 3878 // | / ^ \ / \ | | 3879 // | | \ \ / v | | 3880 // | | dom \ / TOP | | 3881 // | | \ 1v v2 | | 3882 // ^ v regionB | | 3883 // | | | | | 3884 // | | v ^ v 3885 // | | exitB: | | 3886 // | | stmt4 | | 3887 // | +------------>-----------------+ | 3888 // | | 3889 // +-----------------<---------------------+ 3890 // 3891 // 3892 // final graph 3893 // 3894 // stmt1 3895 // | 3896 // v 3897 // predicates 3898 // | 3899 // v 3900 // stmt2 clone 3901 // | 3902 // v 3903 // ........> ifA clone 3904 // : / | 3905 // dom / | 3906 // : v v 3907 // : false true 3908 // : | | 3909 // : | v 3910 // : | newloop<-----+ 3911 // : | | | 3912 // : | stmt3 clone | 3913 // : | | | 3914 // : | v | 3915 // : | ifB | 3916 // : | / \ | 3917 // : | v v | 3918 // : | false true | 3919 // : | | | | 3920 // : | v stmt2 | 3921 // : | exitB: | | 3922 // : | stmt4 v | 3923 // : | ifA orig | 3924 // : | / \ | 3925 // : | / \ | 3926 // : | v v | 3927 // : | false true | 3928 // : | / \ | 3929 // : v v -----+ 3930 // RegionA 3931 // | 3932 // v 3933 // exitA 3934 // 3935 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { 3936 3937 assert(!loop->_head->is_CountedLoop(), "Non-counted loop only"); 3938 if (!loop->_head->is_Loop()) { 3939 return false; 3940 } 3941 LoopNode *head = loop->_head->as_Loop(); 3942 3943 if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) { 3944 return false; 3945 } 3946 3947 // Check for complex exit control 3948 for (uint ii = 0; ii < loop->_body.size(); ii++) { 3949 Node *n = loop->_body.at(ii); 3950 int opc = n->Opcode(); 3951 if (n->is_Call() || 3952 opc == Op_Catch || 3953 opc == Op_CatchProj || 3954 opc == Op_Jump || 3955 opc == Op_JumpProj) { 3956 #ifndef PRODUCT 3957 if (TracePartialPeeling) { 3958 tty->print_cr("\nExit control too complex: lp: %d", head->_idx); 3959 } 3960 #endif 3961 return false; 3962 } 3963 } 3964 3965 int dd = dom_depth(head); 3966 3967 // Step 1: find cut point 3968 3969 // Walk up dominators to loop head looking for first loop exit 3970 // which is executed on every path thru loop. 3971 IfNode *peel_if = nullptr; 3972 IfNode *peel_if_cmpu = nullptr; 3973 3974 Node *iff = loop->tail(); 3975 while (iff != head) { 3976 if (iff->is_If()) { 3977 Node *ctrl = get_ctrl(iff->in(1)); 3978 if (ctrl->is_top()) return false; // Dead test on live IF. 3979 // If loop-varying exit-test, check for induction variable 3980 if (loop->is_member(get_loop(ctrl)) && 3981 loop->is_loop_exit(iff) && 3982 is_possible_iv_test(iff)) { 3983 Node* cmp = iff->in(1)->in(1); 3984 if (cmp->Opcode() == Op_CmpI) { 3985 peel_if = iff->as_If(); 3986 } else { 3987 assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU"); 3988 peel_if_cmpu = iff->as_If(); 3989 } 3990 } 3991 } 3992 iff = idom(iff); 3993 } 3994 3995 // Prefer signed compare over unsigned compare. 3996 IfNode* new_peel_if = nullptr; 3997 if (peel_if == nullptr) { 3998 if (!PartialPeelAtUnsignedTests || peel_if_cmpu == nullptr) { 3999 return false; // No peel point found 4000 } 4001 new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop); 4002 if (new_peel_if == nullptr) { 4003 return false; // No peel point found 4004 } 4005 peel_if = new_peel_if; 4006 } 4007 Node* last_peel = stay_in_loop(peel_if, loop); 4008 Node* first_not_peeled = stay_in_loop(last_peel, loop); 4009 if (first_not_peeled == nullptr || first_not_peeled == head) { 4010 return false; 4011 } 4012 4013 #ifndef PRODUCT 4014 if (TraceLoopOpts) { 4015 tty->print("PartialPeel "); 4016 loop->dump_head(); 4017 } 4018 4019 if (TracePartialPeeling) { 4020 tty->print_cr("before partial peel one iteration"); 4021 Node_List wl; 4022 Node* t = head->in(2); 4023 while (true) { 4024 wl.push(t); 4025 if (t == head) break; 4026 t = idom(t); 4027 } 4028 while (wl.size() > 0) { 4029 Node* tt = wl.pop(); 4030 tt->dump(); 4031 if (tt == last_peel) tty->print_cr("-- cut --"); 4032 } 4033 } 4034 #endif 4035 4036 C->print_method(PHASE_BEFORE_PARTIAL_PEELING, 4, head); 4037 4038 VectorSet peel; 4039 VectorSet not_peel; 4040 Node_List peel_list; 4041 Node_List worklist; 4042 Node_List sink_list; 4043 4044 uint estimate = loop->est_loop_clone_sz(1); 4045 if (exceeding_node_budget(estimate)) { 4046 return false; 4047 } 4048 4049 // Set of cfg nodes to peel are those that are executable from 4050 // the head through last_peel. 4051 assert(worklist.size() == 0, "should be empty"); 4052 worklist.push(head); 4053 peel.set(head->_idx); 4054 while (worklist.size() > 0) { 4055 Node *n = worklist.pop(); 4056 if (n != last_peel) { 4057 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 4058 Node* use = n->fast_out(j); 4059 if (use->is_CFG() && 4060 loop->is_member(get_loop(use)) && 4061 !peel.test_set(use->_idx)) { 4062 worklist.push(use); 4063 } 4064 } 4065 } 4066 } 4067 4068 // Set of non-cfg nodes to peel are those that are control 4069 // dependent on the cfg nodes. 4070 for (uint i = 0; i < loop->_body.size(); i++) { 4071 Node *n = loop->_body.at(i); 4072 Node *n_c = has_ctrl(n) ? get_ctrl(n) : n; 4073 if (peel.test(n_c->_idx)) { 4074 peel.set(n->_idx); 4075 } else { 4076 not_peel.set(n->_idx); 4077 } 4078 } 4079 4080 // Step 2: move operations from the peeled section down into the 4081 // not-peeled section 4082 4083 // Get a post order schedule of nodes in the peel region 4084 // Result in right-most operand. 4085 scheduled_nodelist(loop, peel, peel_list); 4086 4087 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition"); 4088 4089 // For future check for too many new phis 4090 uint old_phi_cnt = 0; 4091 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { 4092 Node* use = head->fast_out(j); 4093 if (use->is_Phi()) old_phi_cnt++; 4094 } 4095 4096 #ifndef PRODUCT 4097 if (TracePartialPeeling) { 4098 tty->print_cr("\npeeled list"); 4099 } 4100 #endif 4101 4102 // Evacuate nodes in peel region into the not_peeled region if possible 4103 bool too_many_clones = false; 4104 uint new_phi_cnt = 0; 4105 uint cloned_for_outside_use = 0; 4106 for (uint i = 0; i < peel_list.size();) { 4107 Node* n = peel_list.at(i); 4108 #ifndef PRODUCT 4109 if (TracePartialPeeling) n->dump(); 4110 #endif 4111 bool incr = true; 4112 if (!n->is_CFG()) { 4113 if (has_use_in_set(n, not_peel)) { 4114 // If not used internal to the peeled region, 4115 // move "n" from peeled to not_peeled region. 4116 if (!has_use_internal_to_set(n, peel, loop)) { 4117 // if not pinned and not a load (which maybe anti-dependent on a store) 4118 // and not a CMove (Matcher expects only bool->cmove). 4119 if (n->in(0) == nullptr && !n->is_Load() && !n->is_CMove()) { 4120 int new_clones = clone_for_use_outside_loop(loop, n, worklist); 4121 if (C->failing()) return false; 4122 if (new_clones == -1) { 4123 too_many_clones = true; 4124 break; 4125 } 4126 cloned_for_outside_use += new_clones; 4127 sink_list.push(n); 4128 peel.remove(n->_idx); 4129 not_peel.set(n->_idx); 4130 peel_list.remove(i); 4131 incr = false; 4132 #ifndef PRODUCT 4133 if (TracePartialPeeling) { 4134 tty->print_cr("sink to not_peeled region: %d newbb: %d", 4135 n->_idx, get_ctrl(n)->_idx); 4136 } 4137 #endif 4138 } 4139 } else { 4140 // Otherwise check for special def-use cases that span 4141 // the peel/not_peel boundary such as bool->if 4142 clone_for_special_use_inside_loop(loop, n, not_peel, sink_list, worklist); 4143 new_phi_cnt++; 4144 } 4145 } 4146 } 4147 if (incr) i++; 4148 } 4149 4150 estimate += cloned_for_outside_use + new_phi_cnt; 4151 bool exceed_node_budget = !may_require_nodes(estimate); 4152 bool exceed_phi_limit = new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta; 4153 4154 if (too_many_clones || exceed_node_budget || exceed_phi_limit) { 4155 #ifndef PRODUCT 4156 if (TracePartialPeeling && exceed_phi_limit) { 4157 tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c", 4158 new_phi_cnt, old_phi_cnt, new_peel_if != nullptr?'T':'F'); 4159 } 4160 #endif 4161 if (new_peel_if != nullptr) { 4162 remove_cmpi_loop_exit(new_peel_if, loop); 4163 } 4164 // Inhibit more partial peeling on this loop 4165 assert(!head->is_partial_peel_loop(), "not partial peeled"); 4166 head->mark_partial_peel_failed(); 4167 if (cloned_for_outside_use > 0) { 4168 // Terminate this round of loop opts because 4169 // the graph outside this loop was changed. 4170 C->set_major_progress(); 4171 return true; 4172 } 4173 return false; 4174 } 4175 4176 // Step 3: clone loop, retarget control, and insert new phis 4177 4178 // Create new loop head for new phis and to hang 4179 // the nodes being moved (sinked) from the peel region. 4180 LoopNode* new_head = new LoopNode(last_peel, last_peel); 4181 new_head->set_unswitch_count(head->unswitch_count()); // Preserve 4182 _igvn.register_new_node_with_optimizer(new_head); 4183 assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled"); 4184 _igvn.replace_input_of(first_not_peeled, 0, new_head); 4185 set_loop(new_head, loop); 4186 loop->_body.push(new_head); 4187 not_peel.set(new_head->_idx); 4188 set_idom(new_head, last_peel, dom_depth(first_not_peeled)); 4189 set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled)); 4190 4191 while (sink_list.size() > 0) { 4192 Node* n = sink_list.pop(); 4193 set_ctrl(n, new_head); 4194 } 4195 4196 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition"); 4197 4198 clone_loop(loop, old_new, dd, IgnoreStripMined); 4199 4200 const uint clone_exit_idx = 1; 4201 const uint orig_exit_idx = 2; 4202 assert(is_valid_clone_loop_form(loop, peel_list, orig_exit_idx, clone_exit_idx), "bad clone loop"); 4203 4204 Node* head_clone = old_new[head->_idx]; 4205 LoopNode* new_head_clone = old_new[new_head->_idx]->as_Loop(); 4206 Node* orig_tail_clone = head_clone->in(2); 4207 4208 // Add phi if "def" node is in peel set and "use" is not 4209 4210 for (uint i = 0; i < peel_list.size(); i++) { 4211 Node *def = peel_list.at(i); 4212 if (!def->is_CFG()) { 4213 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { 4214 Node *use = def->fast_out(j); 4215 if (has_node(use) && use->in(0) != C->top() && 4216 (!peel.test(use->_idx) || 4217 (use->is_Phi() && use->in(0) == head)) ) { 4218 worklist.push(use); 4219 } 4220 } 4221 while( worklist.size() ) { 4222 Node *use = worklist.pop(); 4223 for (uint j = 1; j < use->req(); j++) { 4224 Node* n = use->in(j); 4225 if (n == def) { 4226 4227 // "def" is in peel set, "use" is not in peel set 4228 // or "use" is in the entry boundary (a phi) of the peel set 4229 4230 Node* use_c = has_ctrl(use) ? get_ctrl(use) : use; 4231 4232 if ( loop->is_member(get_loop( use_c )) ) { 4233 // use is in loop 4234 if (old_new[use->_idx] != nullptr) { // null for dead code 4235 Node* use_clone = old_new[use->_idx]; 4236 _igvn.replace_input_of(use, j, C->top()); 4237 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone ); 4238 } 4239 } else { 4240 assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format"); 4241 // use is not in the loop, check if the live range includes the cut 4242 Node* lp_if = use_c->in(orig_exit_idx)->in(0); 4243 if (not_peel.test(lp_if->_idx)) { 4244 assert(j == orig_exit_idx, "use from original loop"); 4245 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone ); 4246 } 4247 } 4248 } 4249 } 4250 } 4251 } 4252 } 4253 4254 // Step 3b: retarget control 4255 4256 // Redirect control to the new loop head if a cloned node in 4257 // the not_peeled region has control that points into the peeled region. 4258 // This necessary because the cloned peeled region will be outside 4259 // the loop. 4260 // from to 4261 // cloned-peeled <---+ 4262 // new_head_clone: | <--+ 4263 // cloned-not_peeled in(0) in(0) 4264 // orig-peeled 4265 4266 for (uint i = 0; i < loop->_body.size(); i++) { 4267 Node *n = loop->_body.at(i); 4268 if (!n->is_CFG() && n->in(0) != nullptr && 4269 not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) { 4270 Node* n_clone = old_new[n->_idx]; 4271 if (n_clone->depends_only_on_test()) { 4272 // Pin array access nodes: control is updated here to the loop head. If, after some transformations, the 4273 // backedge is removed, an array load could become dependent on a condition that's not a range check for that 4274 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk 4275 // floating above its range check. 4276 Node* pinned_clone = n_clone->pin_array_access_node(); 4277 if (pinned_clone != nullptr) { 4278 register_new_node_with_ctrl_of(pinned_clone, n_clone); 4279 old_new.map(n->_idx, pinned_clone); 4280 _igvn.replace_node(n_clone, pinned_clone); 4281 n_clone = pinned_clone; 4282 } 4283 } 4284 _igvn.replace_input_of(n_clone, 0, new_head_clone); 4285 } 4286 } 4287 4288 // Backedge of the surviving new_head (the clone) is original last_peel 4289 _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel); 4290 4291 // Cut first node in original not_peel set 4292 _igvn.rehash_node_delayed(new_head); // Multiple edge updates: 4293 new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of 4294 new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls 4295 4296 // Copy head_clone back-branch info to original head 4297 // and remove original head's loop entry and 4298 // clone head's back-branch 4299 _igvn.rehash_node_delayed(head); // Multiple edge updates 4300 head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl)); 4301 head->set_req(LoopNode::LoopBackControl, C->top()); 4302 _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top()); 4303 4304 // Similarly modify the phis 4305 for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) { 4306 Node* use = head->fast_out(k); 4307 if (use->is_Phi() && use->outcnt() > 0) { 4308 Node* use_clone = old_new[use->_idx]; 4309 _igvn.rehash_node_delayed(use); // Multiple edge updates 4310 use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl)); 4311 use->set_req(LoopNode::LoopBackControl, C->top()); 4312 _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top()); 4313 } 4314 } 4315 4316 // Step 4: update dominator tree and dominator depth 4317 4318 set_idom(head, orig_tail_clone, dd); 4319 recompute_dom_depth(); 4320 4321 // Inhibit more partial peeling on this loop 4322 new_head_clone->set_partial_peel_loop(); 4323 C->set_major_progress(); 4324 loop->record_for_igvn(); 4325 4326 #ifndef PRODUCT 4327 if (TracePartialPeeling) { 4328 tty->print_cr("\nafter partial peel one iteration"); 4329 Node_List wl; 4330 Node* t = last_peel; 4331 while (true) { 4332 wl.push(t); 4333 if (t == head_clone) break; 4334 t = idom(t); 4335 } 4336 while (wl.size() > 0) { 4337 Node* tt = wl.pop(); 4338 if (tt == head) tty->print_cr("orig head"); 4339 else if (tt == new_head_clone) tty->print_cr("new head"); 4340 else if (tt == head_clone) tty->print_cr("clone head"); 4341 tt->dump(); 4342 } 4343 } 4344 #endif 4345 4346 C->print_method(PHASE_AFTER_PARTIAL_PEELING, 4, new_head_clone); 4347 4348 return true; 4349 } 4350 4351 // Transform: 4352 // 4353 // loop<-----------------+ 4354 // | | 4355 // stmt1 stmt2 .. stmtn | 4356 // | | | | 4357 // \ | / | 4358 // v v v | 4359 // region | 4360 // | | 4361 // shared_stmt | 4362 // | | 4363 // v | 4364 // if | 4365 // / \ | 4366 // | -----------+ 4367 // v 4368 // 4369 // into: 4370 // 4371 // loop<-------------------+ 4372 // | | 4373 // v | 4374 // +->loop | 4375 // | | | 4376 // | stmt1 stmt2 .. stmtn | 4377 // | | | | | 4378 // | | \ / | 4379 // | | v v | 4380 // | | region1 | 4381 // | | | | 4382 // | shared_stmt shared_stmt | 4383 // | | | | 4384 // | v v | 4385 // | if if | 4386 // | /\ / \ | 4387 // +-- | | -------+ 4388 // \ / 4389 // v v 4390 // region2 4391 // 4392 // (region2 is shown to merge mirrored projections of the loop exit 4393 // ifs to make the diagram clearer but they really merge the same 4394 // projection) 4395 // 4396 // Conditions for this transformation to trigger: 4397 // - the path through stmt1 is frequent enough 4398 // - the inner loop will be turned into a counted loop after transformation 4399 bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new) { 4400 if (!DuplicateBackedge) { 4401 return false; 4402 } 4403 assert(!loop->_head->is_CountedLoop() || StressDuplicateBackedge, "Non-counted loop only"); 4404 if (!loop->_head->is_Loop()) { 4405 return false; 4406 } 4407 4408 uint estimate = loop->est_loop_clone_sz(1); 4409 if (exceeding_node_budget(estimate)) { 4410 return false; 4411 } 4412 4413 LoopNode *head = loop->_head->as_Loop(); 4414 4415 Node* region = nullptr; 4416 IfNode* exit_test = nullptr; 4417 uint inner; 4418 float f; 4419 if (StressDuplicateBackedge) { 4420 if (head->is_strip_mined()) { 4421 return false; 4422 } 4423 Node* c = head->in(LoopNode::LoopBackControl); 4424 4425 while (c != head) { 4426 if (c->is_Region()) { 4427 region = c; 4428 } 4429 c = idom(c); 4430 } 4431 4432 if (region == nullptr) { 4433 return false; 4434 } 4435 4436 inner = 1; 4437 } else { 4438 // Is the shape of the loop that of a counted loop... 4439 Node* back_control = loop_exit_control(head, loop); 4440 if (back_control == nullptr) { 4441 return false; 4442 } 4443 4444 BoolTest::mask bt = BoolTest::illegal; 4445 float cl_prob = 0; 4446 Node* incr = nullptr; 4447 Node* limit = nullptr; 4448 Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob); 4449 if (cmp == nullptr || cmp->Opcode() != Op_CmpI) { 4450 return false; 4451 } 4452 4453 // With an extra phi for the candidate iv? 4454 // Or the region node is the loop head 4455 if (!incr->is_Phi() || incr->in(0) == head) { 4456 return false; 4457 } 4458 4459 PathFrequency pf(head, this); 4460 region = incr->in(0); 4461 4462 // Go over all paths for the extra phi's region and see if that 4463 // path is frequent enough and would match the expected iv shape 4464 // if the extra phi is removed 4465 inner = 0; 4466 for (uint i = 1; i < incr->req(); ++i) { 4467 Node* in = incr->in(i); 4468 Node* trunc1 = nullptr; 4469 Node* trunc2 = nullptr; 4470 const TypeInteger* iv_trunc_t = nullptr; 4471 Node* orig_in = in; 4472 if (!(in = CountedLoopNode::match_incr_with_optional_truncation(in, &trunc1, &trunc2, &iv_trunc_t, T_INT))) { 4473 continue; 4474 } 4475 assert(in->Opcode() == Op_AddI, "wrong increment code"); 4476 Node* xphi = nullptr; 4477 Node* stride = loop_iv_stride(in, loop, xphi); 4478 4479 if (stride == nullptr) { 4480 continue; 4481 } 4482 4483 PhiNode* phi = loop_iv_phi(xphi, nullptr, head, loop); 4484 if (phi == nullptr || 4485 (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) || 4486 (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) { 4487 return false; 4488 } 4489 4490 f = pf.to(region->in(i)); 4491 if (f > 0.5) { 4492 inner = i; 4493 break; 4494 } 4495 } 4496 4497 if (inner == 0) { 4498 return false; 4499 } 4500 4501 exit_test = back_control->in(0)->as_If(); 4502 } 4503 4504 if (idom(region)->is_Catch()) { 4505 return false; 4506 } 4507 4508 // Collect all control nodes that need to be cloned (shared_stmt in the diagram) 4509 Unique_Node_List wq; 4510 wq.push(head->in(LoopNode::LoopBackControl)); 4511 for (uint i = 0; i < wq.size(); i++) { 4512 Node* c = wq.at(i); 4513 assert(get_loop(c) == loop, "not in the right loop?"); 4514 if (c->is_Region()) { 4515 if (c != region) { 4516 for (uint j = 1; j < c->req(); ++j) { 4517 wq.push(c->in(j)); 4518 } 4519 } 4520 } else { 4521 wq.push(c->in(0)); 4522 } 4523 assert(!is_strict_dominator(c, region), "shouldn't go above region"); 4524 } 4525 4526 Node* region_dom = idom(region); 4527 4528 // Can't do the transformation if this would cause a membar pair to 4529 // be split 4530 for (uint i = 0; i < wq.size(); i++) { 4531 Node* c = wq.at(i); 4532 if (c->is_MemBar() && (c->as_MemBar()->trailing_store() || c->as_MemBar()->trailing_load_store())) { 4533 assert(c->as_MemBar()->leading_membar()->trailing_membar() == c, "bad membar pair"); 4534 if (!wq.member(c->as_MemBar()->leading_membar())) { 4535 return false; 4536 } 4537 } 4538 } 4539 4540 // Collect data nodes that need to be clones as well 4541 int dd = dom_depth(head); 4542 4543 for (uint i = 0; i < loop->_body.size(); ++i) { 4544 Node* n = loop->_body.at(i); 4545 if (has_ctrl(n)) { 4546 Node* c = get_ctrl(n); 4547 if (wq.member(c)) { 4548 wq.push(n); 4549 } 4550 } else { 4551 set_idom(n, idom(n), dd); 4552 } 4553 } 4554 4555 // clone shared_stmt 4556 clone_loop_body(wq, old_new, nullptr); 4557 4558 Node* region_clone = old_new[region->_idx]; 4559 region_clone->set_req(inner, C->top()); 4560 set_idom(region, region->in(inner), dd); 4561 4562 // Prepare the outer loop 4563 Node* outer_head = new LoopNode(head->in(LoopNode::EntryControl), old_new[head->in(LoopNode::LoopBackControl)->_idx]); 4564 register_control(outer_head, loop->_parent, outer_head->in(LoopNode::EntryControl)); 4565 _igvn.replace_input_of(head, LoopNode::EntryControl, outer_head); 4566 set_idom(head, outer_head, dd); 4567 4568 fix_body_edges(wq, loop, old_new, dd, loop->_parent, true); 4569 4570 // Make one of the shared_stmt copies only reachable from stmt1, the 4571 // other only from stmt2..stmtn. 4572 Node* dom = nullptr; 4573 for (uint i = 1; i < region->req(); ++i) { 4574 if (i != inner) { 4575 _igvn.replace_input_of(region, i, C->top()); 4576 } 4577 Node* in = region_clone->in(i); 4578 if (in->is_top()) { 4579 continue; 4580 } 4581 if (dom == nullptr) { 4582 dom = in; 4583 } else { 4584 dom = dom_lca(dom, in); 4585 } 4586 } 4587 4588 set_idom(region_clone, dom, dd); 4589 4590 // Set up the outer loop 4591 for (uint i = 0; i < head->outcnt(); i++) { 4592 Node* u = head->raw_out(i); 4593 if (u->is_Phi()) { 4594 Node* outer_phi = u->clone(); 4595 outer_phi->set_req(0, outer_head); 4596 Node* backedge = old_new[u->in(LoopNode::LoopBackControl)->_idx]; 4597 if (backedge == nullptr) { 4598 backedge = u->in(LoopNode::LoopBackControl); 4599 } 4600 outer_phi->set_req(LoopNode::LoopBackControl, backedge); 4601 register_new_node(outer_phi, outer_head); 4602 _igvn.replace_input_of(u, LoopNode::EntryControl, outer_phi); 4603 } 4604 } 4605 4606 // create control and data nodes for out of loop uses (including region2) 4607 Node_List worklist; 4608 uint new_counter = C->unique(); 4609 fix_ctrl_uses(wq, loop, old_new, ControlAroundStripMined, outer_head, nullptr, worklist); 4610 4611 Node_List *split_if_set = nullptr; 4612 Node_List *split_bool_set = nullptr; 4613 Node_List *split_cex_set = nullptr; 4614 fix_data_uses(wq, loop, ControlAroundStripMined, loop->skip_strip_mined(), new_counter, old_new, worklist, 4615 split_if_set, split_bool_set, split_cex_set); 4616 4617 finish_clone_loop(split_if_set, split_bool_set, split_cex_set); 4618 4619 if (exit_test != nullptr) { 4620 float cnt = exit_test->_fcnt; 4621 if (cnt != COUNT_UNKNOWN) { 4622 exit_test->_fcnt = cnt * f; 4623 old_new[exit_test->_idx]->as_If()->_fcnt = cnt * (1 - f); 4624 } 4625 } 4626 4627 C->set_major_progress(); 4628 4629 return true; 4630 } 4631 4632 // AutoVectorize the loop: replace scalar ops with vector ops. 4633 PhaseIdealLoop::AutoVectorizeStatus 4634 PhaseIdealLoop::auto_vectorize(IdealLoopTree* lpt, VSharedData &vshared) { 4635 // Counted loop only 4636 if (!lpt->is_counted()) { 4637 return AutoVectorizeStatus::Impossible; 4638 } 4639 4640 // Main-loop only 4641 CountedLoopNode* cl = lpt->_head->as_CountedLoop(); 4642 if (!cl->is_main_loop()) { 4643 return AutoVectorizeStatus::Impossible; 4644 } 4645 4646 VLoop vloop(lpt, false); 4647 if (!vloop.check_preconditions()) { 4648 return AutoVectorizeStatus::TriedAndFailed; 4649 } 4650 4651 // Ensure the shared data is cleared before each use 4652 vshared.clear(); 4653 4654 const VLoopAnalyzer vloop_analyzer(vloop, vshared); 4655 if (!vloop_analyzer.success()) { 4656 return AutoVectorizeStatus::TriedAndFailed; 4657 } 4658 4659 SuperWord sw(vloop_analyzer); 4660 if (!sw.transform_loop()) { 4661 return AutoVectorizeStatus::TriedAndFailed; 4662 } 4663 4664 return AutoVectorizeStatus::Success; 4665 } 4666 4667 // Just before insert_pre_post_loops, we can multiversion the loop: 4668 // 4669 // multiversion_if 4670 // | | 4671 // fast_loop slow_loop 4672 // 4673 // In the fast_loop we can make speculative assumptions, and put the 4674 // conditions into the multiversion_if. If the conditions hold at runtime, 4675 // we enter the fast_loop, if the conditions fail, we take the slow_loop 4676 // instead which does not make any of the speculative assumptions. 4677 // 4678 // Note: we only multiversion the loop if the loop does not have any 4679 // auto vectorization check Predicate. If we have that predicate, 4680 // then we can simply add the speculative assumption checks to 4681 // that Predicate. This means we do not need to duplicate the 4682 // loop - we have a smaller graph and save compile time. Should 4683 // the conditions ever fail, then we deopt / trap at the Predicate 4684 // and recompile without that Predicate. At that point we will 4685 // multiversion the loop, so that we can still have speculative 4686 // runtime checks. 4687 // 4688 // We perform the multiversioning when the loop is still in its single 4689 // iteration form, even before we insert pre and post loops. This makes 4690 // the cloning much simpler. However, this means that both the fast 4691 // and the slow loop have to be optimized independently (adding pre 4692 // and post loops, unrolling the main loop, auto-vectorize etc.). And 4693 // we may end up not needing any speculative assumptions in the fast_loop 4694 // and then rejecting the slow_loop by constant folding the multiversion_if. 4695 // 4696 // Therefore, we "delay" the optimization of the slow_loop until we add 4697 // at least one speculative assumption for the fast_loop. If we never 4698 // add such a speculative runtime check, the OpaqueMultiversioningNode 4699 // of the multiversion_if constant folds to true after loop opts, and the 4700 // multiversion_if folds away the "delayed" slow_loop. If we add any 4701 // speculative assumption, then we notify the OpaqueMultiversioningNode 4702 // with "notify_slow_loop_that_it_can_resume_optimizations". 4703 // 4704 // Note: new runtime checks can be added to the multiversion_if with 4705 // PhaseIdealLoop::create_new_if_for_multiversion 4706 void PhaseIdealLoop::maybe_multiversion_for_auto_vectorization_runtime_checks(IdealLoopTree* lpt, Node_List& old_new) { 4707 CountedLoopNode* cl = lpt->_head->as_CountedLoop(); 4708 LoopNode* outer_loop = cl->skip_strip_mined(); 4709 Node* entry = outer_loop->in(LoopNode::EntryControl); 4710 4711 // Check we have multiversioning enabled, and are not already multiversioned. 4712 if (!LoopMultiversioning || cl->is_multiversion()) { return; } 4713 4714 // Check that we do not have a parse-predicate where we can add the runtime checks 4715 // during auto-vectorization. 4716 const Predicates predicates(entry); 4717 const PredicateBlock* predicate_block = predicates.auto_vectorization_check_block(); 4718 if (predicate_block->has_parse_predicate()) { return; } 4719 4720 // Check node budget. 4721 uint estimate = lpt->est_loop_clone_sz(2); 4722 if (!may_require_nodes(estimate)) { return; } 4723 4724 do_multiversioning(lpt, old_new); 4725 } 4726 4727 // Returns true if the Reduction node is unordered. 4728 static bool is_unordered_reduction(Node* n) { 4729 return n->is_Reduction() && !n->as_Reduction()->requires_strict_order(); 4730 } 4731 4732 // Having ReductionNodes in the loop is expensive. They need to recursively 4733 // fold together the vector values, for every vectorized loop iteration. If 4734 // we encounter the following pattern, we can vector accumulate the values 4735 // inside the loop, and only have a single UnorderedReduction after the loop. 4736 // 4737 // Note: UnorderedReduction represents a ReductionNode which does not require 4738 // calculating in strict order. 4739 // 4740 // CountedLoop init 4741 // | | 4742 // +------+ | +-----------------------+ 4743 // | | | | 4744 // PhiNode (s) | 4745 // | | 4746 // | Vector | 4747 // | | | 4748 // UnorderedReduction (first_ur) | 4749 // | | 4750 // ... Vector | 4751 // | | | 4752 // UnorderedReduction (last_ur) | 4753 // | | 4754 // +---------------------+ 4755 // 4756 // We patch the graph to look like this: 4757 // 4758 // CountedLoop identity_vector 4759 // | | 4760 // +-------+ | +---------------+ 4761 // | | | | 4762 // PhiNode (v) | 4763 // | | 4764 // | Vector | 4765 // | | | 4766 // VectorAccumulator | 4767 // | | 4768 // ... Vector | 4769 // | | | 4770 // init VectorAccumulator | 4771 // | | | | 4772 // UnorderedReduction +-----------+ 4773 // 4774 // We turned the scalar (s) Phi into a vectorized one (v). In the loop, we 4775 // use vector_accumulators, which do the same reductions, but only element 4776 // wise. This is a single operation per vector_accumulator, rather than many 4777 // for a UnorderedReduction. We can then reduce the last vector_accumulator 4778 // after the loop, and also reduce the init value into it. 4779 // 4780 // We can not do this with all reductions. Some reductions do not allow the 4781 // reordering of operations (for example float addition/multiplication require 4782 // strict order). 4783 void PhaseIdealLoop::move_unordered_reduction_out_of_loop(IdealLoopTree* loop) { 4784 assert(!C->major_progress() && loop->is_counted() && loop->is_innermost(), "sanity"); 4785 4786 // Find all Phi nodes with an unordered Reduction on backedge. 4787 CountedLoopNode* cl = loop->_head->as_CountedLoop(); 4788 for (DUIterator_Fast jmax, j = cl->fast_outs(jmax); j < jmax; j++) { 4789 Node* phi = cl->fast_out(j); 4790 // We have a phi with a single use, and an unordered Reduction on the backedge. 4791 if (!phi->is_Phi() || phi->outcnt() != 1 || !is_unordered_reduction(phi->in(2))) { 4792 continue; 4793 } 4794 4795 ReductionNode* last_ur = phi->in(2)->as_Reduction(); 4796 assert(!last_ur->requires_strict_order(), "must be"); 4797 4798 // Determine types 4799 const TypeVect* vec_t = last_ur->vect_type(); 4800 uint vector_length = vec_t->length(); 4801 BasicType bt = vec_t->element_basic_type(); 4802 4803 // Convert opcode from vector-reduction -> scalar -> normal-vector-op 4804 const int sopc = VectorNode::scalar_opcode(last_ur->Opcode(), bt); 4805 const int vopc = VectorNode::opcode(sopc, bt); 4806 if (!Matcher::match_rule_supported_vector(vopc, vector_length, bt)) { 4807 DEBUG_ONLY( last_ur->dump(); ) 4808 assert(false, "do not have normal vector op for this reduction"); 4809 continue; // not implemented -> fails 4810 } 4811 4812 // Traverse up the chain of unordered Reductions, checking that it loops back to 4813 // the phi. Check that all unordered Reductions only have a single use, except for 4814 // the last (last_ur), which only has phi as a use in the loop, and all other uses 4815 // are outside the loop. 4816 ReductionNode* current = last_ur; 4817 ReductionNode* first_ur = nullptr; 4818 while (true) { 4819 assert(!current->requires_strict_order(), "sanity"); 4820 4821 // Expect no ctrl and a vector_input from within the loop. 4822 Node* ctrl = current->in(0); 4823 Node* vector_input = current->in(2); 4824 if (ctrl != nullptr || get_ctrl(vector_input) != cl) { 4825 DEBUG_ONLY( current->dump(1); ) 4826 assert(false, "reduction has ctrl or bad vector_input"); 4827 break; // Chain traversal fails. 4828 } 4829 4830 assert(current->vect_type() != nullptr, "must have vector type"); 4831 if (current->vect_type() != last_ur->vect_type()) { 4832 // Reductions do not have the same vector type (length and element type). 4833 break; // Chain traversal fails. 4834 } 4835 4836 // Expect single use of an unordered Reduction, except for last_ur. 4837 if (current == last_ur) { 4838 // Expect all uses to be outside the loop, except phi. 4839 for (DUIterator_Fast kmax, k = current->fast_outs(kmax); k < kmax; k++) { 4840 Node* use = current->fast_out(k); 4841 if (use != phi && ctrl_or_self(use) == cl) { 4842 DEBUG_ONLY( current->dump(-1); ) 4843 assert(false, "reduction has use inside loop"); 4844 // Should not be allowed by SuperWord::mark_reductions 4845 return; // bail out of optimization 4846 } 4847 } 4848 } else { 4849 if (current->outcnt() != 1) { 4850 break; // Chain traversal fails. 4851 } 4852 } 4853 4854 // Expect another unordered Reduction or phi as the scalar input. 4855 Node* scalar_input = current->in(1); 4856 if (is_unordered_reduction(scalar_input) && 4857 scalar_input->Opcode() == current->Opcode()) { 4858 // Move up the unordered Reduction chain. 4859 current = scalar_input->as_Reduction(); 4860 assert(!current->requires_strict_order(), "must be"); 4861 } else if (scalar_input == phi) { 4862 // Chain terminates at phi. 4863 first_ur = current; 4864 current = nullptr; 4865 break; // Success. 4866 } else { 4867 // scalar_input is neither phi nor a matching reduction 4868 // Can for example be scalar reduction when we have 4869 // partial vectorization. 4870 break; // Chain traversal fails. 4871 } 4872 } 4873 if (current != nullptr) { 4874 // Chain traversal was not successful. 4875 continue; 4876 } 4877 assert(first_ur != nullptr, "must have successfully terminated chain traversal"); 4878 4879 Node* identity_scalar = ReductionNode::make_identity_con_scalar(_igvn, sopc, bt); 4880 set_root_as_ctrl(identity_scalar); 4881 VectorNode* identity_vector = VectorNode::scalar2vector(identity_scalar, vector_length, bt); 4882 register_new_node(identity_vector, C->root()); 4883 assert(vec_t == identity_vector->vect_type(), "matching vector type"); 4884 VectorNode::trace_new_vector(identity_vector, "Unordered Reduction"); 4885 4886 // Turn the scalar phi into a vector phi. 4887 _igvn.rehash_node_delayed(phi); 4888 Node* init = phi->in(1); // Remember init before replacing it. 4889 phi->set_req_X(1, identity_vector, &_igvn); 4890 phi->as_Type()->set_type(vec_t); 4891 _igvn.set_type(phi, vec_t); 4892 4893 // Traverse down the chain of unordered Reductions, and replace them with vector_accumulators. 4894 current = first_ur; 4895 while (true) { 4896 // Create vector_accumulator to replace current. 4897 Node* last_vector_accumulator = current->in(1); 4898 Node* vector_input = current->in(2); 4899 VectorNode* vector_accumulator = VectorNode::make(vopc, last_vector_accumulator, vector_input, vec_t); 4900 register_new_node(vector_accumulator, cl); 4901 _igvn.replace_node(current, vector_accumulator); 4902 VectorNode::trace_new_vector(vector_accumulator, "Unordered Reduction"); 4903 if (current == last_ur) { 4904 break; 4905 } 4906 current = vector_accumulator->unique_out()->as_Reduction(); 4907 assert(!current->requires_strict_order(), "must be"); 4908 } 4909 4910 // Create post-loop reduction. 4911 Node* last_accumulator = phi->in(2); 4912 Node* post_loop_reduction = ReductionNode::make(sopc, nullptr, init, last_accumulator, bt); 4913 4914 // Take over uses of last_accumulator that are not in the loop. 4915 for (DUIterator i = last_accumulator->outs(); last_accumulator->has_out(i); i++) { 4916 Node* use = last_accumulator->out(i); 4917 if (use != phi && use != post_loop_reduction) { 4918 assert(ctrl_or_self(use) != cl, "use must be outside loop"); 4919 use->replace_edge(last_accumulator, post_loop_reduction, &_igvn); 4920 --i; 4921 } 4922 } 4923 register_new_node(post_loop_reduction, get_late_ctrl(post_loop_reduction, cl)); 4924 VectorNode::trace_new_vector(post_loop_reduction, "Unordered Reduction"); 4925 4926 assert(last_accumulator->outcnt() == 2, "last_accumulator has 2 uses: phi and post_loop_reduction"); 4927 assert(post_loop_reduction->outcnt() > 0, "should have taken over all non loop uses of last_accumulator"); 4928 assert(phi->outcnt() == 1, "accumulator is the only use of phi"); 4929 } 4930 } 4931 4932 void DataNodeGraph::clone_data_nodes(Node* new_ctrl) { 4933 for (uint i = 0; i < _data_nodes.size(); i++) { 4934 clone(_data_nodes[i], new_ctrl); 4935 } 4936 } 4937 4938 // Clone the given node and set it up properly. Set 'new_ctrl' as ctrl. 4939 void DataNodeGraph::clone(Node* node, Node* new_ctrl) { 4940 Node* clone = node->clone(); 4941 _phase->igvn().register_new_node_with_optimizer(clone); 4942 _orig_to_new.put(node, clone); 4943 _phase->set_ctrl(clone, new_ctrl); 4944 if (node->is_CastII()) { 4945 clone->set_req(0, new_ctrl); 4946 } 4947 } 4948 4949 // Rewire the data inputs of all (unprocessed) cloned nodes, whose inputs are still pointing to the same inputs as their 4950 // corresponding orig nodes, to the newly cloned inputs to create a separate cloned graph. 4951 void DataNodeGraph::rewire_clones_to_cloned_inputs() { 4952 _orig_to_new.iterate_all([&](Node* node, Node* clone) { 4953 for (uint i = 1; i < node->req(); i++) { 4954 Node** cloned_input = _orig_to_new.get(node->in(i)); 4955 if (cloned_input != nullptr) { 4956 // Input was also cloned -> rewire clone to the cloned input. 4957 _phase->igvn().replace_input_of(clone, i, *cloned_input); 4958 } 4959 } 4960 }); 4961 } 4962 4963 // Clone all non-OpaqueLoop* nodes and apply the provided transformation strategy for OpaqueLoop* nodes. 4964 // Set 'new_ctrl' as ctrl for all cloned non-OpaqueLoop* nodes. 4965 void DataNodeGraph::clone_data_nodes_and_transform_opaque_loop_nodes( 4966 const TransformStrategyForOpaqueLoopNodes& transform_strategy, 4967 Node* new_ctrl) { 4968 for (uint i = 0; i < _data_nodes.size(); i++) { 4969 Node* data_node = _data_nodes[i]; 4970 if (data_node->is_Opaque1()) { 4971 transform_opaque_node(transform_strategy, data_node); 4972 } else { 4973 clone(data_node, new_ctrl); 4974 } 4975 } 4976 } 4977 4978 void DataNodeGraph::transform_opaque_node(const TransformStrategyForOpaqueLoopNodes& transform_strategy, Node* node) { 4979 Node* transformed_node; 4980 if (node->is_OpaqueLoopInit()) { 4981 transformed_node = transform_strategy.transform_opaque_init(node->as_OpaqueLoopInit()); 4982 } else { 4983 assert(node->is_OpaqueLoopStride(), "must be OpaqueLoopStrideNode"); 4984 transformed_node = transform_strategy.transform_opaque_stride(node->as_OpaqueLoopStride()); 4985 } 4986 // Add an orig->new mapping to correctly update the inputs of the copied graph in rewire_clones_to_cloned_inputs(). 4987 _orig_to_new.put(node, transformed_node); 4988 }