1 /* 2 * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_LOOPNODE_HPP 26 #define SHARE_OPTO_LOOPNODE_HPP 27 28 #include "opto/cfgnode.hpp" 29 #include "opto/multnode.hpp" 30 #include "opto/phaseX.hpp" 31 #include "opto/subnode.hpp" 32 #include "opto/type.hpp" 33 34 class CmpNode; 35 class BaseCountedLoopEndNode; 36 class CountedLoopNode; 37 class IdealLoopTree; 38 class LoopNode; 39 class Node; 40 class OuterStripMinedLoopEndNode; 41 class ParsePredicates; 42 class PathFrequency; 43 class PhaseIdealLoop; 44 class CountedLoopReserveKit; 45 class VectorSet; 46 class Invariance; 47 struct small_cache; 48 49 // 50 // I D E A L I Z E D L O O P S 51 // 52 // Idealized loops are the set of loops I perform more interesting 53 // transformations on, beyond simple hoisting. 54 55 //------------------------------LoopNode--------------------------------------- 56 // Simple loop header. Fall in path on left, loop-back path on right. 57 class LoopNode : public RegionNode { 58 // Size is bigger to hold the flags. However, the flags do not change 59 // the semantics so it does not appear in the hash & cmp functions. 60 virtual uint size_of() const { return sizeof(*this); } 61 protected: 62 uint _loop_flags; 63 // Names for flag bitfields 64 enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3, 65 MainHasNoPreLoop = 1<<2, 66 HasExactTripCount = 1<<3, 67 InnerLoop = 1<<4, 68 PartialPeelLoop = 1<<5, 69 PartialPeelFailed = 1<<6, 70 WasSlpAnalyzed = 1<<7, 71 PassedSlpAnalysis = 1<<8, 72 DoUnrollOnly = 1<<9, 73 VectorizedLoop = 1<<10, 74 HasAtomicPostLoop = 1<<11, 75 StripMined = 1<<12, 76 SubwordLoop = 1<<13, 77 ProfileTripFailed = 1<<14, 78 LoopNestInnerLoop = 1<<15, 79 LoopNestLongOuterLoop = 1<<16, 80 FlatArrays = 1<<17}; 81 char _unswitch_count; 82 enum { _unswitch_max=3 }; 83 84 // Expected trip count from profile data 85 float _profile_trip_cnt; 86 87 public: 88 // Names for edge indices 89 enum { Self=0, EntryControl, LoopBackControl }; 90 91 bool is_inner_loop() const { return _loop_flags & InnerLoop; } 92 void set_inner_loop() { _loop_flags |= InnerLoop; } 93 94 bool is_vectorized_loop() const { return _loop_flags & VectorizedLoop; } 95 bool is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; } 96 void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; } 97 bool partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; } 98 bool is_strip_mined() const { return _loop_flags & StripMined; } 99 bool is_profile_trip_failed() const { return _loop_flags & ProfileTripFailed; } 100 bool is_subword_loop() const { return _loop_flags & SubwordLoop; } 101 bool is_loop_nest_inner_loop() const { return _loop_flags & LoopNestInnerLoop; } 102 bool is_loop_nest_outer_loop() const { return _loop_flags & LoopNestLongOuterLoop; } 103 bool is_flat_arrays() const { return _loop_flags & FlatArrays; } 104 105 void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; } 106 void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; } 107 void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; } 108 void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; } 109 void mark_loop_vectorized() { _loop_flags |= VectorizedLoop; } 110 void mark_has_atomic_post_loop() { _loop_flags |= HasAtomicPostLoop; } 111 void mark_strip_mined() { _loop_flags |= StripMined; } 112 void clear_strip_mined() { _loop_flags &= ~StripMined; } 113 void mark_profile_trip_failed() { _loop_flags |= ProfileTripFailed; } 114 void mark_subword_loop() { _loop_flags |= SubwordLoop; } 115 void mark_loop_nest_inner_loop() { _loop_flags |= LoopNestInnerLoop; } 116 void mark_loop_nest_outer_loop() { _loop_flags |= LoopNestLongOuterLoop; } 117 void mark_flat_arrays() { _loop_flags |= FlatArrays; } 118 119 int unswitch_max() { return _unswitch_max; } 120 int unswitch_count() { return _unswitch_count; } 121 122 void set_unswitch_count(int val) { 123 assert (val <= unswitch_max(), "too many unswitches"); 124 _unswitch_count = val; 125 } 126 127 void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; } 128 float profile_trip_cnt() { return _profile_trip_cnt; } 129 130 LoopNode(Node *entry, Node *backedge) 131 : RegionNode(3), _loop_flags(0), _unswitch_count(0), 132 _profile_trip_cnt(COUNT_UNKNOWN) { 133 init_class_id(Class_Loop); 134 init_req(EntryControl, entry); 135 init_req(LoopBackControl, backedge); 136 } 137 138 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 139 virtual int Opcode() const; 140 bool can_be_counted_loop(PhaseValues* phase) const { 141 return req() == 3 && in(0) != nullptr && 142 in(1) != nullptr && phase->type(in(1)) != Type::TOP && 143 in(2) != nullptr && phase->type(in(2)) != Type::TOP; 144 } 145 bool is_valid_counted_loop(BasicType bt) const; 146 #ifndef PRODUCT 147 virtual void dump_spec(outputStream *st) const; 148 #endif 149 150 void verify_strip_mined(int expect_skeleton) const NOT_DEBUG_RETURN; 151 virtual LoopNode* skip_strip_mined(int expect_skeleton = 1) { return this; } 152 virtual IfTrueNode* outer_loop_tail() const { ShouldNotReachHere(); return nullptr; } 153 virtual OuterStripMinedLoopEndNode* outer_loop_end() const { ShouldNotReachHere(); return nullptr; } 154 virtual IfFalseNode* outer_loop_exit() const { ShouldNotReachHere(); return nullptr; } 155 virtual SafePointNode* outer_safepoint() const { ShouldNotReachHere(); return nullptr; } 156 }; 157 158 //------------------------------Counted Loops---------------------------------- 159 // Counted loops are all trip-counted loops, with exactly 1 trip-counter exit 160 // path (and maybe some other exit paths). The trip-counter exit is always 161 // last in the loop. The trip-counter have to stride by a constant; 162 // the exit value is also loop invariant. 163 164 // CountedLoopNodes and CountedLoopEndNodes come in matched pairs. The 165 // CountedLoopNode has the incoming loop control and the loop-back-control 166 // which is always the IfTrue before the matching CountedLoopEndNode. The 167 // CountedLoopEndNode has an incoming control (possibly not the 168 // CountedLoopNode if there is control flow in the loop), the post-increment 169 // trip-counter value, and the limit. The trip-counter value is always of 170 // the form (Op old-trip-counter stride). The old-trip-counter is produced 171 // by a Phi connected to the CountedLoopNode. The stride is constant. 172 // The Op is any commutable opcode, including Add, Mul, Xor. The 173 // CountedLoopEndNode also takes in the loop-invariant limit value. 174 175 // From a CountedLoopNode I can reach the matching CountedLoopEndNode via the 176 // loop-back control. From CountedLoopEndNodes I can reach CountedLoopNodes 177 // via the old-trip-counter from the Op node. 178 179 //------------------------------CountedLoopNode-------------------------------- 180 // CountedLoopNodes head simple counted loops. CountedLoopNodes have as 181 // inputs the incoming loop-start control and the loop-back control, so they 182 // act like RegionNodes. They also take in the initial trip counter, the 183 // loop-invariant stride and the loop-invariant limit value. CountedLoopNodes 184 // produce a loop-body control and the trip counter value. Since 185 // CountedLoopNodes behave like RegionNodes I still have a standard CFG model. 186 187 class BaseCountedLoopNode : public LoopNode { 188 public: 189 BaseCountedLoopNode(Node *entry, Node *backedge) 190 : LoopNode(entry, backedge) { 191 } 192 193 Node *init_control() const { return in(EntryControl); } 194 Node *back_control() const { return in(LoopBackControl); } 195 196 Node* init_trip() const; 197 Node* stride() const; 198 bool stride_is_con() const; 199 Node* limit() const; 200 Node* incr() const; 201 Node* phi() const; 202 203 BaseCountedLoopEndNode* loopexit_or_null() const; 204 BaseCountedLoopEndNode* loopexit() const; 205 206 virtual BasicType bt() const = 0; 207 208 jlong stride_con() const; 209 210 static BaseCountedLoopNode* make(Node* entry, Node* backedge, BasicType bt); 211 }; 212 213 214 class CountedLoopNode : public BaseCountedLoopNode { 215 // Size is bigger to hold _main_idx. However, _main_idx does not change 216 // the semantics so it does not appear in the hash & cmp functions. 217 virtual uint size_of() const { return sizeof(*this); } 218 219 // For Pre- and Post-loops during debugging ONLY, this holds the index of 220 // the Main CountedLoop. Used to assert that we understand the graph shape. 221 node_idx_t _main_idx; 222 223 // Known trip count calculated by compute_exact_trip_count() 224 uint _trip_count; 225 226 // Log2 of original loop bodies in unrolled loop 227 int _unrolled_count_log2; 228 229 // Node count prior to last unrolling - used to decide if 230 // unroll,optimize,unroll,optimize,... is making progress 231 int _node_count_before_unroll; 232 233 // If slp analysis is performed we record the maximum 234 // vector mapped unroll factor here 235 int _slp_maximum_unroll_factor; 236 237 // The eventual count of vectorizable packs in slp 238 int _slp_vector_pack_count; 239 240 public: 241 CountedLoopNode(Node *entry, Node *backedge) 242 : BaseCountedLoopNode(entry, backedge), _main_idx(0), _trip_count(max_juint), 243 _unrolled_count_log2(0), _node_count_before_unroll(0), 244 _slp_maximum_unroll_factor(0), _slp_vector_pack_count(0) { 245 init_class_id(Class_CountedLoop); 246 // Initialize _trip_count to the largest possible value. 247 // Will be reset (lower) if the loop's trip count is known. 248 } 249 250 virtual int Opcode() const; 251 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 252 253 CountedLoopEndNode* loopexit_or_null() const { return (CountedLoopEndNode*) BaseCountedLoopNode::loopexit_or_null(); } 254 CountedLoopEndNode* loopexit() const { return (CountedLoopEndNode*) BaseCountedLoopNode::loopexit(); } 255 int stride_con() const; 256 257 // Match increment with optional truncation 258 static Node* 259 match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, const TypeInteger** trunc_type, 260 BasicType bt); 261 262 // A 'main' loop has a pre-loop and a post-loop. The 'main' loop 263 // can run short a few iterations and may start a few iterations in. 264 // It will be RCE'd and unrolled and aligned. 265 266 // A following 'post' loop will run any remaining iterations. Used 267 // during Range Check Elimination, the 'post' loop will do any final 268 // iterations with full checks. Also used by Loop Unrolling, where 269 // the 'post' loop will do any epilog iterations needed. Basically, 270 // a 'post' loop can not profitably be further unrolled or RCE'd. 271 272 // A preceding 'pre' loop will run at least 1 iteration (to do peeling), 273 // it may do under-flow checks for RCE and may do alignment iterations 274 // so the following main loop 'knows' that it is striding down cache 275 // lines. 276 277 // A 'main' loop that is ONLY unrolled or peeled, never RCE'd or 278 // Aligned, may be missing it's pre-loop. 279 bool is_normal_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Normal; } 280 bool is_pre_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Pre; } 281 bool is_main_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Main; } 282 bool is_post_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Post; } 283 bool was_slp_analyzed () const { return (_loop_flags&WasSlpAnalyzed) == WasSlpAnalyzed; } 284 bool has_passed_slp () const { return (_loop_flags&PassedSlpAnalysis) == PassedSlpAnalysis; } 285 bool is_unroll_only () const { return (_loop_flags&DoUnrollOnly) == DoUnrollOnly; } 286 bool is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; } 287 bool has_atomic_post_loop () const { return (_loop_flags & HasAtomicPostLoop) == HasAtomicPostLoop; } 288 void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; } 289 290 int main_idx() const { return _main_idx; } 291 292 293 void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; } 294 void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; } 295 void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Post; _main_idx = main->_idx; } 296 void set_normal_loop( ) { _loop_flags &= ~PreMainPostFlagsMask; } 297 298 void set_trip_count(uint tc) { _trip_count = tc; } 299 uint trip_count() { return _trip_count; } 300 301 bool has_exact_trip_count() const { return (_loop_flags & HasExactTripCount) != 0; } 302 void set_exact_trip_count(uint tc) { 303 _trip_count = tc; 304 _loop_flags |= HasExactTripCount; 305 } 306 void set_nonexact_trip_count() { 307 _loop_flags &= ~HasExactTripCount; 308 } 309 void set_notpassed_slp() { 310 _loop_flags &= ~PassedSlpAnalysis; 311 } 312 313 void double_unrolled_count() { _unrolled_count_log2++; } 314 int unrolled_count() { return 1 << MIN2(_unrolled_count_log2, BitsPerInt-3); } 315 316 void set_node_count_before_unroll(int ct) { _node_count_before_unroll = ct; } 317 int node_count_before_unroll() { return _node_count_before_unroll; } 318 void set_slp_max_unroll(int unroll_factor) { _slp_maximum_unroll_factor = unroll_factor; } 319 int slp_max_unroll() const { return _slp_maximum_unroll_factor; } 320 321 virtual LoopNode* skip_strip_mined(int expect_skeleton = 1); 322 OuterStripMinedLoopNode* outer_loop() const; 323 virtual IfTrueNode* outer_loop_tail() const; 324 virtual OuterStripMinedLoopEndNode* outer_loop_end() const; 325 virtual IfFalseNode* outer_loop_exit() const; 326 virtual SafePointNode* outer_safepoint() const; 327 328 // If this is a main loop in a pre/main/post loop nest, walk over 329 // the predicates that were inserted by 330 // duplicate_predicates()/add_range_check_predicate() 331 static Node* skip_predicates_from_entry(Node* ctrl); 332 Node* skip_predicates(); 333 334 virtual BasicType bt() const { 335 return T_INT; 336 } 337 338 Node* is_canonical_loop_entry(); 339 340 #ifndef PRODUCT 341 virtual void dump_spec(outputStream *st) const; 342 #endif 343 344 static bool is_zero_trip_guard_if(const IfNode* iff); 345 }; 346 347 class LongCountedLoopNode : public BaseCountedLoopNode { 348 public: 349 LongCountedLoopNode(Node *entry, Node *backedge) 350 : BaseCountedLoopNode(entry, backedge) { 351 init_class_id(Class_LongCountedLoop); 352 } 353 354 virtual int Opcode() const; 355 356 virtual BasicType bt() const { 357 return T_LONG; 358 } 359 360 LongCountedLoopEndNode* loopexit_or_null() const { return (LongCountedLoopEndNode*) BaseCountedLoopNode::loopexit_or_null(); } 361 LongCountedLoopEndNode* loopexit() const { return (LongCountedLoopEndNode*) BaseCountedLoopNode::loopexit(); } 362 }; 363 364 365 //------------------------------CountedLoopEndNode----------------------------- 366 // CountedLoopEndNodes end simple trip counted loops. They act much like 367 // IfNodes. 368 369 class BaseCountedLoopEndNode : public IfNode { 370 public: 371 enum { TestControl, TestValue }; 372 BaseCountedLoopEndNode(Node *control, Node *test, float prob, float cnt) 373 : IfNode(control, test, prob, cnt) { 374 init_class_id(Class_BaseCountedLoopEnd); 375 } 376 377 Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : nullptr; } 378 Node* incr() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(1) : nullptr; } 379 Node* limit() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(2) : nullptr; } 380 Node* stride() const { Node* tmp = incr(); return (tmp && tmp->req() == 3) ? tmp->in(2) : nullptr; } 381 Node* init_trip() const { Node* tmp = phi(); return (tmp && tmp->req() == 3) ? tmp->in(1) : nullptr; } 382 bool stride_is_con() const { Node *tmp = stride(); return (tmp != nullptr && tmp->is_Con()); } 383 384 PhiNode* phi() const { 385 Node* tmp = incr(); 386 if (tmp && tmp->req() == 3) { 387 Node* phi = tmp->in(1); 388 if (phi->is_Phi()) { 389 return phi->as_Phi(); 390 } 391 } 392 return nullptr; 393 } 394 395 BaseCountedLoopNode* loopnode() const { 396 // The CountedLoopNode that goes with this CountedLoopEndNode may 397 // have been optimized out by the IGVN so be cautious with the 398 // pattern matching on the graph 399 PhiNode* iv_phi = phi(); 400 if (iv_phi == nullptr) { 401 return nullptr; 402 } 403 Node* ln = iv_phi->in(0); 404 if (!ln->is_BaseCountedLoop() || ln->as_BaseCountedLoop()->loopexit_or_null() != this) { 405 return nullptr; 406 } 407 if (ln->as_BaseCountedLoop()->bt() != bt()) { 408 return nullptr; 409 } 410 return ln->as_BaseCountedLoop(); 411 } 412 413 BoolTest::mask test_trip() const { return in(TestValue)->as_Bool()->_test._test; } 414 415 jlong stride_con() const; 416 virtual BasicType bt() const = 0; 417 418 static BaseCountedLoopEndNode* make(Node* control, Node* test, float prob, float cnt, BasicType bt); 419 }; 420 421 class CountedLoopEndNode : public BaseCountedLoopEndNode { 422 public: 423 424 CountedLoopEndNode(Node *control, Node *test, float prob, float cnt) 425 : BaseCountedLoopEndNode(control, test, prob, cnt) { 426 init_class_id(Class_CountedLoopEnd); 427 } 428 virtual int Opcode() const; 429 430 CountedLoopNode* loopnode() const { 431 return (CountedLoopNode*) BaseCountedLoopEndNode::loopnode(); 432 } 433 434 virtual BasicType bt() const { 435 return T_INT; 436 } 437 438 #ifndef PRODUCT 439 virtual void dump_spec(outputStream *st) const; 440 #endif 441 }; 442 443 class LongCountedLoopEndNode : public BaseCountedLoopEndNode { 444 public: 445 LongCountedLoopEndNode(Node *control, Node *test, float prob, float cnt) 446 : BaseCountedLoopEndNode(control, test, prob, cnt) { 447 init_class_id(Class_LongCountedLoopEnd); 448 } 449 450 LongCountedLoopNode* loopnode() const { 451 return (LongCountedLoopNode*) BaseCountedLoopEndNode::loopnode(); 452 } 453 454 virtual int Opcode() const; 455 456 virtual BasicType bt() const { 457 return T_LONG; 458 } 459 }; 460 461 462 inline BaseCountedLoopEndNode* BaseCountedLoopNode::loopexit_or_null() const { 463 Node* bctrl = back_control(); 464 if (bctrl == nullptr) return nullptr; 465 466 Node* lexit = bctrl->in(0); 467 if (!lexit->is_BaseCountedLoopEnd()) { 468 return nullptr; 469 } 470 BaseCountedLoopEndNode* result = lexit->as_BaseCountedLoopEnd(); 471 if (result->bt() != bt()) { 472 return nullptr; 473 } 474 return result; 475 } 476 477 inline BaseCountedLoopEndNode* BaseCountedLoopNode::loopexit() const { 478 BaseCountedLoopEndNode* cle = loopexit_or_null(); 479 assert(cle != nullptr, "loopexit is null"); 480 return cle; 481 } 482 483 inline Node* BaseCountedLoopNode::init_trip() const { 484 BaseCountedLoopEndNode* cle = loopexit_or_null(); 485 return cle != nullptr ? cle->init_trip() : nullptr; 486 } 487 inline Node* BaseCountedLoopNode::stride() const { 488 BaseCountedLoopEndNode* cle = loopexit_or_null(); 489 return cle != nullptr ? cle->stride() : nullptr; 490 } 491 492 inline bool BaseCountedLoopNode::stride_is_con() const { 493 BaseCountedLoopEndNode* cle = loopexit_or_null(); 494 return cle != nullptr && cle->stride_is_con(); 495 } 496 inline Node* BaseCountedLoopNode::limit() const { 497 BaseCountedLoopEndNode* cle = loopexit_or_null(); 498 return cle != nullptr ? cle->limit() : nullptr; 499 } 500 inline Node* BaseCountedLoopNode::incr() const { 501 BaseCountedLoopEndNode* cle = loopexit_or_null(); 502 return cle != nullptr ? cle->incr() : nullptr; 503 } 504 inline Node* BaseCountedLoopNode::phi() const { 505 BaseCountedLoopEndNode* cle = loopexit_or_null(); 506 return cle != nullptr ? cle->phi() : nullptr; 507 } 508 509 inline jlong BaseCountedLoopNode::stride_con() const { 510 BaseCountedLoopEndNode* cle = loopexit_or_null(); 511 return cle != nullptr ? cle->stride_con() : 0; 512 } 513 514 515 //------------------------------LoopLimitNode----------------------------- 516 // Counted Loop limit node which represents exact final iterator value: 517 // trip_count = (limit - init_trip + stride - 1)/stride 518 // final_value= trip_count * stride + init_trip. 519 // Use HW instructions to calculate it when it can overflow in integer. 520 // Note, final_value should fit into integer since counted loop has 521 // limit check: limit <= max_int-stride. 522 class LoopLimitNode : public Node { 523 enum { Init=1, Limit=2, Stride=3 }; 524 public: 525 LoopLimitNode( Compile* C, Node *init, Node *limit, Node *stride ) : Node(0,init,limit,stride) { 526 // Put it on the Macro nodes list to optimize during macro nodes expansion. 527 init_flags(Flag_is_macro); 528 C->add_macro_node(this); 529 } 530 virtual int Opcode() const; 531 virtual const Type *bottom_type() const { return TypeInt::INT; } 532 virtual uint ideal_reg() const { return Op_RegI; } 533 virtual const Type* Value(PhaseGVN* phase) const; 534 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 535 virtual Node* Identity(PhaseGVN* phase); 536 }; 537 538 // Support for strip mining 539 class OuterStripMinedLoopNode : public LoopNode { 540 private: 541 static void fix_sunk_stores(CountedLoopEndNode* inner_cle, LoopNode* inner_cl, PhaseIterGVN* igvn, PhaseIdealLoop* iloop); 542 543 public: 544 OuterStripMinedLoopNode(Compile* C, Node *entry, Node *backedge) 545 : LoopNode(entry, backedge) { 546 init_class_id(Class_OuterStripMinedLoop); 547 init_flags(Flag_is_macro); 548 C->add_macro_node(this); 549 } 550 551 virtual int Opcode() const; 552 553 virtual IfTrueNode* outer_loop_tail() const; 554 virtual OuterStripMinedLoopEndNode* outer_loop_end() const; 555 virtual IfFalseNode* outer_loop_exit() const; 556 virtual SafePointNode* outer_safepoint() const; 557 void adjust_strip_mined_loop(PhaseIterGVN* igvn); 558 559 void remove_outer_loop_and_safepoint(PhaseIterGVN* igvn) const; 560 561 void transform_to_counted_loop(PhaseIterGVN* igvn, PhaseIdealLoop* iloop); 562 563 static Node* register_new_node(Node* node, LoopNode* ctrl, PhaseIterGVN* igvn, PhaseIdealLoop* iloop); 564 565 Node* register_control(Node* node, Node* loop, Node* idom, PhaseIterGVN* igvn, 566 PhaseIdealLoop* iloop); 567 }; 568 569 class OuterStripMinedLoopEndNode : public IfNode { 570 public: 571 OuterStripMinedLoopEndNode(Node *control, Node *test, float prob, float cnt) 572 : IfNode(control, test, prob, cnt) { 573 init_class_id(Class_OuterStripMinedLoopEnd); 574 } 575 576 virtual int Opcode() const; 577 578 virtual const Type* Value(PhaseGVN* phase) const; 579 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 580 581 bool is_expanded(PhaseGVN *phase) const; 582 }; 583 584 // -----------------------------IdealLoopTree---------------------------------- 585 class IdealLoopTree : public ResourceObj { 586 public: 587 IdealLoopTree *_parent; // Parent in loop tree 588 IdealLoopTree *_next; // Next sibling in loop tree 589 IdealLoopTree *_child; // First child in loop tree 590 591 // The head-tail backedge defines the loop. 592 // If a loop has multiple backedges, this is addressed during cleanup where 593 // we peel off the multiple backedges, merging all edges at the bottom and 594 // ensuring that one proper backedge flow into the loop. 595 Node *_head; // Head of loop 596 Node *_tail; // Tail of loop 597 inline Node *tail(); // Handle lazy update of _tail field 598 inline Node *head(); // Handle lazy update of _head field 599 PhaseIdealLoop* _phase; 600 int _local_loop_unroll_limit; 601 int _local_loop_unroll_factor; 602 603 Node_List _body; // Loop body for inner loops 604 605 uint16_t _nest; // Nesting depth 606 uint8_t _irreducible:1, // True if irreducible 607 _has_call:1, // True if has call safepoint 608 _has_sfpt:1, // True if has non-call safepoint 609 _rce_candidate:1, // True if candidate for range check elimination 610 _has_range_checks:1, 611 _has_range_checks_computed:1; 612 613 Node_List* _safepts; // List of safepoints in this loop 614 Node_List* _required_safept; // A inner loop cannot delete these safepts; 615 bool _allow_optimizations; // Allow loop optimizations 616 617 IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail ) 618 : _parent(0), _next(0), _child(0), 619 _head(head), _tail(tail), 620 _phase(phase), 621 _local_loop_unroll_limit(0), _local_loop_unroll_factor(0), 622 _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0), 623 _has_range_checks(0), _has_range_checks_computed(0), 624 _safepts(nullptr), 625 _required_safept(nullptr), 626 _allow_optimizations(true) 627 { 628 precond(_head != nullptr); 629 precond(_tail != nullptr); 630 } 631 632 // Is 'l' a member of 'this'? 633 bool is_member(const IdealLoopTree *l) const; // Test for nested membership 634 635 // Set loop nesting depth. Accumulate has_call bits. 636 int set_nest( uint depth ); 637 638 // Split out multiple fall-in edges from the loop header. Move them to a 639 // private RegionNode before the loop. This becomes the loop landing pad. 640 void split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ); 641 642 // Split out the outermost loop from this shared header. 643 void split_outer_loop( PhaseIdealLoop *phase ); 644 645 // Merge all the backedges from the shared header into a private Region. 646 // Feed that region as the one backedge to this loop. 647 void merge_many_backedges( PhaseIdealLoop *phase ); 648 649 // Split shared headers and insert loop landing pads. 650 // Insert a LoopNode to replace the RegionNode. 651 // Returns TRUE if loop tree is structurally changed. 652 bool beautify_loops( PhaseIdealLoop *phase ); 653 654 // Perform optimization to use the loop predicates for null checks and range checks. 655 // Applies to any loop level (not just the innermost one) 656 bool loop_predication( PhaseIdealLoop *phase); 657 658 // Perform iteration-splitting on inner loops. Split iterations to 659 // avoid range checks or one-shot null checks. Returns false if the 660 // current round of loop opts should stop. 661 bool iteration_split( PhaseIdealLoop *phase, Node_List &old_new ); 662 663 // Driver for various flavors of iteration splitting. Returns false 664 // if the current round of loop opts should stop. 665 bool iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ); 666 667 // Given dominators, try to find loops with calls that must always be 668 // executed (call dominates loop tail). These loops do not need non-call 669 // safepoints (ncsfpt). 670 void check_safepts(VectorSet &visited, Node_List &stack); 671 672 // Allpaths backwards scan from loop tail, terminating each path at first safepoint 673 // encountered. 674 void allpaths_check_safepts(VectorSet &visited, Node_List &stack); 675 676 // Remove safepoints from loop. Optionally keeping one. 677 void remove_safepoints(PhaseIdealLoop* phase, bool keep_one); 678 679 // Convert to counted loops where possible 680 void counted_loop( PhaseIdealLoop *phase ); 681 682 // Check for Node being a loop-breaking test 683 Node *is_loop_exit(Node *iff) const; 684 685 // Remove simplistic dead code from loop body 686 void DCE_loop_body(); 687 688 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage. 689 // Replace with a 1-in-10 exit guess. 690 void adjust_loop_exit_prob( PhaseIdealLoop *phase ); 691 692 // Return TRUE or FALSE if the loop should never be RCE'd or aligned. 693 // Useful for unrolling loops with NO array accesses. 694 bool policy_peel_only( PhaseIdealLoop *phase ) const; 695 696 // Return TRUE or FALSE if the loop should be unswitched -- clone 697 // loop with an invariant test 698 bool policy_unswitching( PhaseIdealLoop *phase ) const; 699 700 // Micro-benchmark spamming. Remove empty loops. 701 bool do_remove_empty_loop( PhaseIdealLoop *phase ); 702 703 // Convert one iteration loop into normal code. 704 bool do_one_iteration_loop( PhaseIdealLoop *phase ); 705 706 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can 707 // move some loop-invariant test (usually a null-check) before the loop. 708 bool policy_peeling(PhaseIdealLoop *phase); 709 710 uint estimate_peeling(PhaseIdealLoop *phase); 711 712 // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any 713 // known trip count in the counted loop node. 714 bool policy_maximally_unroll(PhaseIdealLoop *phase) const; 715 716 // Return TRUE or FALSE if the loop should be unrolled or not. Apply unroll 717 // if the loop is a counted loop and the loop body is small enough. 718 bool policy_unroll(PhaseIdealLoop *phase); 719 720 // Loop analyses to map to a maximal superword unrolling for vectorization. 721 void policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct); 722 723 // Return TRUE or FALSE if the loop should be range-check-eliminated. 724 // Gather a list of IF tests that are dominated by iteration splitting; 725 // also gather the end of the first split and the start of the 2nd split. 726 bool policy_range_check(PhaseIdealLoop* phase, bool provisional, BasicType bt) const; 727 728 // Return TRUE if "iff" is a range check. 729 bool is_range_check_if(IfProjNode* if_success_proj, PhaseIdealLoop* phase, Invariance& invar DEBUG_ONLY(COMMA ProjNode* predicate_proj)) const; 730 bool is_range_check_if(IfProjNode* if_success_proj, PhaseIdealLoop* phase, BasicType bt, Node* iv, Node*& range, Node*& offset, 731 jlong& scale) const; 732 733 // Estimate the number of nodes required when cloning a loop (body). 734 uint est_loop_clone_sz(uint factor) const; 735 // Estimate the number of nodes required when unrolling a loop (body). 736 uint est_loop_unroll_sz(uint factor) const; 737 738 // Compute loop trip count if possible 739 void compute_trip_count(PhaseIdealLoop* phase); 740 741 // Compute loop trip count from profile data 742 float compute_profile_trip_cnt_helper(Node* n); 743 void compute_profile_trip_cnt( PhaseIdealLoop *phase ); 744 745 // Reassociate invariant expressions. 746 void reassociate_invariants(PhaseIdealLoop *phase); 747 // Reassociate invariant binary expressions. 748 Node* reassociate(Node* n1, PhaseIdealLoop *phase); 749 // Reassociate invariant add and subtract expressions. 750 Node* reassociate_add_sub(Node* n1, int inv1_idx, int inv2_idx, PhaseIdealLoop *phase); 751 // Return nonzero index of invariant operand if invariant and variant 752 // are combined with an associative binary. Helper for reassociate_invariants. 753 int find_invariant(Node* n, PhaseIdealLoop *phase); 754 // Return TRUE if "n" is associative. 755 bool is_associative(Node* n, Node* base=nullptr); 756 757 // Return true if n is invariant 758 bool is_invariant(Node* n) const; 759 760 // Put loop body on igvn work list 761 void record_for_igvn(); 762 763 bool is_root() { return _parent == nullptr; } 764 // A proper/reducible loop w/o any (occasional) dead back-edge. 765 bool is_loop() { return !_irreducible && !tail()->is_top(); } 766 bool is_counted() { return is_loop() && _head->is_CountedLoop(); } 767 bool is_innermost() { return is_loop() && _child == nullptr; } 768 769 void remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase); 770 771 bool compute_has_range_checks() const; 772 bool range_checks_present() { 773 if (!_has_range_checks_computed) { 774 if (compute_has_range_checks()) { 775 _has_range_checks = 1; 776 } 777 _has_range_checks_computed = 1; 778 } 779 return _has_range_checks; 780 } 781 782 #ifndef PRODUCT 783 void dump_head(); // Dump loop head only 784 void dump(); // Dump this loop recursively 785 #endif 786 787 #ifdef ASSERT 788 GrowableArray<IdealLoopTree*> collect_sorted_children() const; 789 bool verify_tree(IdealLoopTree* loop_verify) const; 790 #endif 791 792 private: 793 enum { EMPTY_LOOP_SIZE = 7 }; // Number of nodes in an empty loop. 794 795 // Estimate the number of nodes resulting from control and data flow merge. 796 uint est_loop_flow_merge_sz() const; 797 798 // Check if the number of residual iterations is large with unroll_cnt. 799 // Return true if the residual iterations are more than 10% of the trip count. 800 bool is_residual_iters_large(int unroll_cnt, CountedLoopNode *cl) const { 801 return (unroll_cnt - 1) * (100.0 / LoopPercentProfileLimit) > cl->profile_trip_cnt(); 802 } 803 804 void collect_loop_core_nodes(PhaseIdealLoop* phase, Unique_Node_List& wq) const; 805 806 bool empty_loop_with_data_nodes(PhaseIdealLoop* phase) const; 807 808 void enqueue_data_nodes(PhaseIdealLoop* phase, Unique_Node_List& empty_loop_nodes, Unique_Node_List& wq) const; 809 810 bool process_safepoint(PhaseIdealLoop* phase, Unique_Node_List& empty_loop_nodes, Unique_Node_List& wq, 811 Node* sfpt) const; 812 813 bool empty_loop_candidate(PhaseIdealLoop* phase) const; 814 815 bool empty_loop_with_extra_nodes_candidate(PhaseIdealLoop* phase) const; 816 }; 817 818 // -----------------------------PhaseIdealLoop--------------------------------- 819 // Computes the mapping from Nodes to IdealLoopTrees. Organizes IdealLoopTrees 820 // into a loop tree. Drives the loop-based transformations on the ideal graph. 821 class PhaseIdealLoop : public PhaseTransform { 822 friend class IdealLoopTree; 823 friend class SuperWord; 824 friend class CountedLoopReserveKit; 825 friend class ShenandoahBarrierC2Support; 826 friend class AutoNodeBudget; 827 828 // Map loop membership for CFG nodes, and ctrl for non-CFG nodes. 829 Node_List _loop_or_ctrl; 830 831 // Pre-computed def-use info 832 PhaseIterGVN &_igvn; 833 834 // Head of loop tree 835 IdealLoopTree* _ltree_root; 836 837 // Array of pre-order numbers, plus post-visited bit. 838 // ZERO for not pre-visited. EVEN for pre-visited but not post-visited. 839 // ODD for post-visited. Other bits are the pre-order number. 840 uint *_preorders; 841 uint _max_preorder; 842 843 const PhaseIdealLoop* _verify_me; 844 bool _verify_only; 845 846 // Allocate _preorders[] array 847 void allocate_preorders() { 848 _max_preorder = C->unique()+8; 849 _preorders = NEW_RESOURCE_ARRAY(uint, _max_preorder); 850 memset(_preorders, 0, sizeof(uint) * _max_preorder); 851 } 852 853 // Allocate _preorders[] array 854 void reallocate_preorders() { 855 if ( _max_preorder < C->unique() ) { 856 _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, C->unique()); 857 _max_preorder = C->unique(); 858 } 859 memset(_preorders, 0, sizeof(uint) * _max_preorder); 860 } 861 862 // Check to grow _preorders[] array for the case when build_loop_tree_impl() 863 // adds new nodes. 864 void check_grow_preorders( ) { 865 if ( _max_preorder < C->unique() ) { 866 uint newsize = _max_preorder<<1; // double size of array 867 _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, newsize); 868 memset(&_preorders[_max_preorder],0,sizeof(uint)*(newsize-_max_preorder)); 869 _max_preorder = newsize; 870 } 871 } 872 // Check for pre-visited. Zero for NOT visited; non-zero for visited. 873 int is_visited( Node *n ) const { return _preorders[n->_idx]; } 874 // Pre-order numbers are written to the Nodes array as low-bit-set values. 875 void set_preorder_visited( Node *n, int pre_order ) { 876 assert( !is_visited( n ), "already set" ); 877 _preorders[n->_idx] = (pre_order<<1); 878 }; 879 // Return pre-order number. 880 int get_preorder( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]>>1; } 881 882 // Check for being post-visited. 883 // Should be previsited already (checked with assert(is_visited(n))). 884 int is_postvisited( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]&1; } 885 886 // Mark as post visited 887 void set_postvisited( Node *n ) { assert( !is_postvisited( n ), "" ); _preorders[n->_idx] |= 1; } 888 889 public: 890 // Set/get control node out. Set lower bit to distinguish from IdealLoopTree 891 // Returns true if "n" is a data node, false if it's a control node. 892 bool has_ctrl(const Node* n) const { return ((intptr_t)_loop_or_ctrl[n->_idx]) & 1; } 893 894 private: 895 // clear out dead code after build_loop_late 896 Node_List _deadlist; 897 Node_List _zero_trip_guard_opaque_nodes; 898 899 // Support for faster execution of get_late_ctrl()/dom_lca() 900 // when a node has many uses and dominator depth is deep. 901 GrowableArray<jlong> _dom_lca_tags; 902 uint _dom_lca_tags_round; 903 void init_dom_lca_tags(); 904 905 // Helper for debugging bad dominance relationships 906 bool verify_dominance(Node* n, Node* use, Node* LCA, Node* early); 907 908 Node* compute_lca_of_uses(Node* n, Node* early, bool verify = false); 909 910 // Inline wrapper for frequent cases: 911 // 1) only one use 912 // 2) a use is the same as the current LCA passed as 'n1' 913 Node *dom_lca_for_get_late_ctrl( Node *lca, Node *n, Node *tag ) { 914 assert( n->is_CFG(), "" ); 915 // Fast-path null lca 916 if( lca != nullptr && lca != n ) { 917 assert( lca->is_CFG(), "" ); 918 // find LCA of all uses 919 n = dom_lca_for_get_late_ctrl_internal( lca, n, tag ); 920 } 921 return find_non_split_ctrl(n); 922 } 923 Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag ); 924 925 // Helper function for directing control inputs away from CFG split points. 926 Node *find_non_split_ctrl( Node *ctrl ) const { 927 if (ctrl != nullptr) { 928 if (ctrl->is_MultiBranch()) { 929 ctrl = ctrl->in(0); 930 } 931 assert(ctrl->is_CFG(), "CFG"); 932 } 933 return ctrl; 934 } 935 936 Node* cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop); 937 938 #ifdef ASSERT 939 void ensure_zero_trip_guard_proj(Node* node, bool is_main_loop); 940 #endif 941 void copy_assertion_predicates_to_main_loop_helper(Node* predicate, Node* init, Node* stride, IdealLoopTree* outer_loop, 942 LoopNode* outer_main_head, uint dd_main_head, 943 uint idx_before_pre_post, uint idx_after_post_before_pre, 944 Node* zero_trip_guard_proj_main, Node* zero_trip_guard_proj_post, 945 const Node_List &old_new); 946 void copy_assertion_predicates_to_main_loop(CountedLoopNode* pre_head, Node* init, Node* stride, IdealLoopTree* outer_loop, 947 LoopNode* outer_main_head, uint dd_main_head, uint idx_before_pre_post, 948 uint idx_after_post_before_pre, Node* zero_trip_guard_proj_main, 949 Node* zero_trip_guard_proj_post, const Node_List& old_new); 950 Node* clone_assertion_predicate_and_initialize(Node* iff, Node* new_init, Node* new_stride, Node* predicate, 951 Node* uncommon_proj, Node* control, IdealLoopTree* outer_loop, 952 Node* input_proj); 953 static void count_opaque_loop_nodes(Node* n, uint& init, uint& stride); 954 static bool subgraph_has_opaque(Node* n); 955 Node* create_bool_from_template_assertion_predicate(Node* template_assertion_predicate, Node* new_init, Node* new_stride, 956 Node* control); 957 static bool assertion_predicate_has_loop_opaque_node(IfNode* iff); 958 static void get_assertion_predicates(Node* predicate, Unique_Node_List& list, bool get_opaque = false); 959 void update_main_loop_assertion_predicates(Node* ctrl, CountedLoopNode* loop_head, Node* init, int stride_con); 960 void copy_assertion_predicates_to_post_loop(LoopNode* main_loop_head, CountedLoopNode* post_loop_head, Node* init, 961 Node* stride); 962 void initialize_assertion_predicates_for_peeled_loop(IfProjNode* predicate_proj, LoopNode* outer_loop_head, 963 const int dd_outer_loop_head, Node* init, Node* stride, 964 IdealLoopTree* outer_loop, const uint idx_before_clone, 965 const Node_List& old_new); 966 void insert_loop_limit_check_predicate(ParsePredicateSuccessProj* loop_limit_check_parse_proj, Node* cmp_limit, 967 Node* bol); 968 #ifdef ASSERT 969 bool only_has_infinite_loops(); 970 #endif 971 972 void log_loop_tree(); 973 974 public: 975 976 PhaseIterGVN &igvn() const { return _igvn; } 977 978 bool has_node(const Node* n) const { 979 guarantee(n != nullptr, "No Node."); 980 return _loop_or_ctrl[n->_idx] != nullptr; 981 } 982 // check if transform created new nodes that need _ctrl recorded 983 Node *get_late_ctrl( Node *n, Node *early ); 984 Node *get_early_ctrl( Node *n ); 985 Node *get_early_ctrl_for_expensive(Node *n, Node* earliest); 986 void set_early_ctrl(Node* n, bool update_body); 987 void set_subtree_ctrl(Node* n, bool update_body); 988 void set_ctrl( Node *n, Node *ctrl ) { 989 assert( !has_node(n) || has_ctrl(n), "" ); 990 assert( ctrl->in(0), "cannot set dead control node" ); 991 assert( ctrl == find_non_split_ctrl(ctrl), "must set legal crtl" ); 992 _loop_or_ctrl.map(n->_idx, (Node*)((intptr_t)ctrl + 1)); 993 } 994 // Set control and update loop membership 995 void set_ctrl_and_loop(Node* n, Node* ctrl) { 996 IdealLoopTree* old_loop = get_loop(get_ctrl(n)); 997 IdealLoopTree* new_loop = get_loop(ctrl); 998 if (old_loop != new_loop) { 999 if (old_loop->_child == nullptr) old_loop->_body.yank(n); 1000 if (new_loop->_child == nullptr) new_loop->_body.push(n); 1001 } 1002 set_ctrl(n, ctrl); 1003 } 1004 // Control nodes can be replaced or subsumed. During this pass they 1005 // get their replacement Node in slot 1. Instead of updating the block 1006 // location of all Nodes in the subsumed block, we lazily do it. As we 1007 // pull such a subsumed block out of the array, we write back the final 1008 // correct block. 1009 Node* get_ctrl(const Node* i) { 1010 assert(has_node(i), ""); 1011 Node *n = get_ctrl_no_update(i); 1012 _loop_or_ctrl.map(i->_idx, (Node*)((intptr_t)n + 1)); 1013 assert(has_node(i) && has_ctrl(i), ""); 1014 assert(n == find_non_split_ctrl(n), "must return legal ctrl" ); 1015 return n; 1016 } 1017 // true if CFG node d dominates CFG node n 1018 bool is_dominator(Node *d, Node *n); 1019 // return get_ctrl for a data node and self(n) for a CFG node 1020 Node* ctrl_or_self(Node* n) { 1021 if (has_ctrl(n)) 1022 return get_ctrl(n); 1023 else { 1024 assert (n->is_CFG(), "must be a CFG node"); 1025 return n; 1026 } 1027 } 1028 1029 Node* get_ctrl_no_update_helper(const Node* i) const { 1030 assert(has_ctrl(i), "should be control, not loop"); 1031 return (Node*)(((intptr_t)_loop_or_ctrl[i->_idx]) & ~1); 1032 } 1033 1034 Node* get_ctrl_no_update(const Node* i) const { 1035 assert( has_ctrl(i), "" ); 1036 Node *n = get_ctrl_no_update_helper(i); 1037 if (!n->in(0)) { 1038 // Skip dead CFG nodes 1039 do { 1040 n = get_ctrl_no_update_helper(n); 1041 } while (!n->in(0)); 1042 n = find_non_split_ctrl(n); 1043 } 1044 return n; 1045 } 1046 1047 // Check for loop being set 1048 // "n" must be a control node. Returns true if "n" is known to be in a loop. 1049 bool has_loop( Node *n ) const { 1050 assert(!has_node(n) || !has_ctrl(n), ""); 1051 return has_node(n); 1052 } 1053 // Set loop 1054 void set_loop( Node *n, IdealLoopTree *loop ) { 1055 _loop_or_ctrl.map(n->_idx, (Node*)loop); 1056 } 1057 // Lazy-dazy update of 'get_ctrl' and 'idom_at' mechanisms. Replace 1058 // the 'old_node' with 'new_node'. Kill old-node. Add a reference 1059 // from old_node to new_node to support the lazy update. Reference 1060 // replaces loop reference, since that is not needed for dead node. 1061 void lazy_update(Node *old_node, Node *new_node) { 1062 assert(old_node != new_node, "no cycles please"); 1063 // Re-use the side array slot for this node to provide the 1064 // forwarding pointer. 1065 _loop_or_ctrl.map(old_node->_idx, (Node*)((intptr_t)new_node + 1)); 1066 } 1067 void lazy_replace(Node *old_node, Node *new_node) { 1068 _igvn.replace_node(old_node, new_node); 1069 lazy_update(old_node, new_node); 1070 } 1071 1072 private: 1073 1074 // Place 'n' in some loop nest, where 'n' is a CFG node 1075 void build_loop_tree(); 1076 int build_loop_tree_impl( Node *n, int pre_order ); 1077 // Insert loop into the existing loop tree. 'innermost' is a leaf of the 1078 // loop tree, not the root. 1079 IdealLoopTree *sort( IdealLoopTree *loop, IdealLoopTree *innermost ); 1080 1081 #ifdef ASSERT 1082 // verify that regions in irreducible loops are marked is_in_irreducible_loop 1083 void verify_regions_in_irreducible_loops(); 1084 bool is_in_irreducible_loop(RegionNode* region); 1085 #endif 1086 1087 // Place Data nodes in some loop nest 1088 void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ); 1089 void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ); 1090 void build_loop_late_post_work(Node* n, bool pinned); 1091 void build_loop_late_post(Node* n); 1092 void verify_strip_mined_scheduling(Node *n, Node* least); 1093 1094 // Array of immediate dominance info for each CFG node indexed by node idx 1095 private: 1096 uint _idom_size; 1097 Node **_idom; // Array of immediate dominators 1098 uint *_dom_depth; // Used for fast LCA test 1099 GrowableArray<uint>* _dom_stk; // For recomputation of dom depth 1100 LoopOptsMode _mode; 1101 1102 // build the loop tree and perform any requested optimizations 1103 void build_and_optimize(); 1104 1105 // Dominators for the sea of nodes 1106 void Dominators(); 1107 1108 // Compute the Ideal Node to Loop mapping 1109 PhaseIdealLoop(PhaseIterGVN& igvn, LoopOptsMode mode) : 1110 PhaseTransform(Ideal_Loop), 1111 _igvn(igvn), 1112 _verify_me(nullptr), 1113 _verify_only(false), 1114 _mode(mode), 1115 _nodes_required(UINT_MAX) { 1116 assert(mode != LoopOptsVerify, "wrong constructor to verify IdealLoop"); 1117 build_and_optimize(); 1118 } 1119 1120 #ifndef PRODUCT 1121 // Verify that verify_me made the same decisions as a fresh run 1122 // or only verify that the graph is valid if verify_me is null. 1123 PhaseIdealLoop(PhaseIterGVN& igvn, const PhaseIdealLoop* verify_me = nullptr) : 1124 PhaseTransform(Ideal_Loop), 1125 _igvn(igvn), 1126 _verify_me(verify_me), 1127 _verify_only(verify_me == nullptr), 1128 _mode(LoopOptsVerify), 1129 _nodes_required(UINT_MAX) { 1130 build_and_optimize(); 1131 } 1132 #endif 1133 1134 public: 1135 Node* idom_no_update(Node* d) const { 1136 return idom_no_update(d->_idx); 1137 } 1138 1139 Node* idom_no_update(uint didx) const { 1140 assert(didx < _idom_size, "oob"); 1141 Node* n = _idom[didx]; 1142 assert(n != nullptr,"Bad immediate dominator info."); 1143 while (n->in(0) == nullptr) { // Skip dead CFG nodes 1144 n = (Node*)(((intptr_t)_loop_or_ctrl[n->_idx]) & ~1); 1145 assert(n != nullptr,"Bad immediate dominator info."); 1146 } 1147 return n; 1148 } 1149 1150 Node *idom(Node* d) const { 1151 return idom(d->_idx); 1152 } 1153 1154 Node *idom(uint didx) const { 1155 Node *n = idom_no_update(didx); 1156 _idom[didx] = n; // Lazily remove dead CFG nodes from table. 1157 return n; 1158 } 1159 1160 uint dom_depth(Node* d) const { 1161 guarantee(d != nullptr, "Null dominator info."); 1162 guarantee(d->_idx < _idom_size, ""); 1163 return _dom_depth[d->_idx]; 1164 } 1165 void set_idom(Node* d, Node* n, uint dom_depth); 1166 // Locally compute IDOM using dom_lca call 1167 Node *compute_idom( Node *region ) const; 1168 // Recompute dom_depth 1169 void recompute_dom_depth(); 1170 1171 // Is safept not required by an outer loop? 1172 bool is_deleteable_safept(Node* sfpt); 1173 1174 // Replace parallel induction variable (parallel to trip counter) 1175 void replace_parallel_iv(IdealLoopTree *loop); 1176 1177 Node *dom_lca( Node *n1, Node *n2 ) const { 1178 return find_non_split_ctrl(dom_lca_internal(n1, n2)); 1179 } 1180 Node *dom_lca_internal( Node *n1, Node *n2 ) const; 1181 1182 // Build and verify the loop tree without modifying the graph. This 1183 // is useful to verify that all inputs properly dominate their uses. 1184 static void verify(PhaseIterGVN& igvn) { 1185 #ifdef ASSERT 1186 ResourceMark rm; 1187 Compile::TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]); 1188 PhaseIdealLoop v(igvn); 1189 #endif 1190 } 1191 1192 // Recommended way to use PhaseIdealLoop. 1193 // Run PhaseIdealLoop in some mode and allocates a local scope for memory allocations. 1194 static void optimize(PhaseIterGVN &igvn, LoopOptsMode mode) { 1195 ResourceMark rm; 1196 PhaseIdealLoop v(igvn, mode); 1197 1198 Compile* C = Compile::current(); 1199 if (!C->failing()) { 1200 // Cleanup any modified bits 1201 igvn.optimize(); 1202 1203 v.log_loop_tree(); 1204 } 1205 } 1206 1207 // True if the method has at least 1 irreducible loop 1208 bool _has_irreducible_loops; 1209 1210 // Per-Node transform 1211 virtual Node* transform(Node* n) { return nullptr; } 1212 1213 Node* loop_exit_control(Node* x, IdealLoopTree* loop); 1214 Node* loop_exit_test(Node* back_control, IdealLoopTree* loop, Node*& incr, Node*& limit, BoolTest::mask& bt, float& cl_prob); 1215 Node* loop_iv_incr(Node* incr, Node* x, IdealLoopTree* loop, Node*& phi_incr); 1216 Node* loop_iv_stride(Node* incr, IdealLoopTree* loop, Node*& xphi); 1217 PhiNode* loop_iv_phi(Node* xphi, Node* phi_incr, Node* x, IdealLoopTree* loop); 1218 1219 bool is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_bt); 1220 1221 Node* loop_nest_replace_iv(Node* iv_to_replace, Node* inner_iv, Node* outer_phi, Node* inner_head, BasicType bt); 1222 bool create_loop_nest(IdealLoopTree* loop, Node_List &old_new); 1223 #ifdef ASSERT 1224 bool convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* loop); 1225 #endif 1226 void add_parse_predicate(Deoptimization::DeoptReason reason, Node* inner_head, IdealLoopTree* loop, SafePointNode* sfpt); 1227 SafePointNode* find_safepoint(Node* back_control, Node* x, IdealLoopTree* loop); 1228 IdealLoopTree* insert_outer_loop(IdealLoopTree* loop, LoopNode* outer_l, Node* outer_ift); 1229 IdealLoopTree* create_outer_strip_mined_loop(BoolNode *test, Node *cmp, Node *init_control, 1230 IdealLoopTree* loop, float cl_prob, float le_fcnt, 1231 Node*& entry_control, Node*& iffalse); 1232 1233 Node* exact_limit( IdealLoopTree *loop ); 1234 1235 // Return a post-walked LoopNode 1236 IdealLoopTree *get_loop( Node *n ) const { 1237 // Dead nodes have no loop, so return the top level loop instead 1238 if (!has_node(n)) return _ltree_root; 1239 assert(!has_ctrl(n), ""); 1240 return (IdealLoopTree*)_loop_or_ctrl[n->_idx]; 1241 } 1242 1243 IdealLoopTree* ltree_root() const { return _ltree_root; } 1244 1245 // Is 'n' a (nested) member of 'loop'? 1246 int is_member( const IdealLoopTree *loop, Node *n ) const { 1247 return loop->is_member(get_loop(n)); } 1248 1249 // This is the basic building block of the loop optimizations. It clones an 1250 // entire loop body. It makes an old_new loop body mapping; with this 1251 // mapping you can find the new-loop equivalent to an old-loop node. All 1252 // new-loop nodes are exactly equal to their old-loop counterparts, all 1253 // edges are the same. All exits from the old-loop now have a RegionNode 1254 // that merges the equivalent new-loop path. This is true even for the 1255 // normal "loop-exit" condition. All uses of loop-invariant old-loop values 1256 // now come from (one or more) Phis that merge their new-loop equivalents. 1257 // Parameter side_by_side_idom: 1258 // When side_by_size_idom is null, the dominator tree is constructed for 1259 // the clone loop to dominate the original. Used in construction of 1260 // pre-main-post loop sequence. 1261 // When nonnull, the clone and original are side-by-side, both are 1262 // dominated by the passed in side_by_side_idom node. Used in 1263 // construction of unswitched loops. 1264 enum CloneLoopMode { 1265 IgnoreStripMined = 0, // Only clone inner strip mined loop 1266 CloneIncludesStripMined = 1, // clone both inner and outer strip mined loops 1267 ControlAroundStripMined = 2 // Only clone inner strip mined loop, 1268 // result control flow branches 1269 // either to inner clone or outer 1270 // strip mined loop. 1271 }; 1272 void clone_loop( IdealLoopTree *loop, Node_List &old_new, int dom_depth, 1273 CloneLoopMode mode, Node* side_by_side_idom = nullptr); 1274 void clone_loop_handle_data_uses(Node* old, Node_List &old_new, 1275 IdealLoopTree* loop, IdealLoopTree* companion_loop, 1276 Node_List*& split_if_set, Node_List*& split_bool_set, 1277 Node_List*& split_cex_set, Node_List& worklist, 1278 uint new_counter, CloneLoopMode mode); 1279 void clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop, 1280 IdealLoopTree* outer_loop, int dd, Node_List &old_new, 1281 Node_List& extra_data_nodes); 1282 1283 // If we got the effect of peeling, either by actually peeling or by 1284 // making a pre-loop which must execute at least once, we can remove 1285 // all loop-invariant dominated tests in the main body. 1286 void peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ); 1287 1288 // Generate code to do a loop peel for the given loop (and body). 1289 // old_new is a temp array. 1290 void do_peeling( IdealLoopTree *loop, Node_List &old_new ); 1291 1292 // Add pre and post loops around the given loop. These loops are used 1293 // during RCE, unrolling and aligning loops. 1294 void insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ); 1295 1296 // Add post loop after the given loop. 1297 Node *insert_post_loop(IdealLoopTree* loop, Node_List& old_new, 1298 CountedLoopNode* main_head, CountedLoopEndNode* main_end, 1299 Node*& incr, Node* limit, CountedLoopNode*& post_head); 1300 1301 // Add a vector post loop between a vector main loop and the current post loop 1302 void insert_vector_post_loop(IdealLoopTree *loop, Node_List &old_new); 1303 // If Node n lives in the back_ctrl block, we clone a private version of n 1304 // in preheader_ctrl block and return that, otherwise return n. 1305 Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ); 1306 1307 // Take steps to maximally unroll the loop. Peel any odd iterations, then 1308 // unroll to do double iterations. The next round of major loop transforms 1309 // will repeat till the doubled loop body does all remaining iterations in 1 1310 // pass. 1311 void do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ); 1312 1313 // Unroll the loop body one step - make each trip do 2 iterations. 1314 void do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ); 1315 1316 // Return true if exp is a constant times an induction var 1317 bool is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_scale, bool* p_short_scale, int depth = 0); 1318 1319 bool is_iv(Node* exp, Node* iv, BasicType bt); 1320 1321 // Return true if exp is a scaled induction var plus (or minus) constant 1322 bool is_scaled_iv_plus_offset(Node* exp, Node* iv, BasicType bt, jlong* p_scale, Node** p_offset, bool* p_short_scale = nullptr, int depth = 0); 1323 bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset) { 1324 jlong long_scale; 1325 if (is_scaled_iv_plus_offset(exp, iv, T_INT, &long_scale, p_offset)) { 1326 int int_scale = checked_cast<int>(long_scale); 1327 if (p_scale != nullptr) { 1328 *p_scale = int_scale; 1329 } 1330 return true; 1331 } 1332 return false; 1333 } 1334 // Helper for finding more complex matches to is_scaled_iv_plus_offset. 1335 bool is_scaled_iv_plus_extra_offset(Node* exp1, Node* offset2, Node* iv, 1336 BasicType bt, 1337 jlong* p_scale, Node** p_offset, 1338 bool* p_short_scale, int depth); 1339 1340 // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted 1341 IfProjNode* create_new_if_for_predicate(IfProjNode* cont_proj, Node* new_entry, Deoptimization::DeoptReason reason, 1342 int opcode, bool rewire_uncommon_proj_phi_inputs = false, 1343 bool if_cont_is_true_proj = true); 1344 1345 private: 1346 // Helper functions for create_new_if_for_predicate() 1347 void set_ctrl_of_nodes_with_same_ctrl(Node* node, ProjNode* old_ctrl, Node* new_ctrl); 1348 Unique_Node_List find_nodes_with_same_ctrl(Node* node, const ProjNode* ctrl); 1349 Node* clone_nodes_with_same_ctrl(Node* node, ProjNode* old_ctrl, Node* new_ctrl); 1350 Dict clone_nodes(const Node_List& list_to_clone); 1351 void rewire_cloned_nodes_to_ctrl(const ProjNode* old_ctrl, Node* new_ctrl, const Node_List& nodes_with_same_ctrl, 1352 const Dict& old_new_mapping); 1353 void rewire_inputs_of_clones_to_clones(Node* new_ctrl, Node* clone, const Dict& old_new_mapping, const Node* next); 1354 1355 public: 1356 void register_control(Node* n, IdealLoopTree *loop, Node* pred, bool update_body = true); 1357 1358 // Construct a range check for a predicate if 1359 BoolNode* rc_predicate(IdealLoopTree* loop, Node* ctrl, int scale, Node* offset, Node* init, Node* limit, 1360 jint stride, Node* range, bool upper, bool& overflow); 1361 1362 // Implementation of the loop predication to promote checks outside the loop 1363 bool loop_predication_impl(IdealLoopTree *loop); 1364 bool loop_predication_impl_helper(IdealLoopTree* loop, IfProjNode* if_success_proj, 1365 ParsePredicateSuccessProj* parse_predicate_proj, CountedLoopNode* cl, ConNode* zero, 1366 Invariance& invar, Deoptimization::DeoptReason reason); 1367 bool loop_predication_should_follow_branches(IdealLoopTree* loop, IfProjNode* predicate_proj, float& loop_trip_cnt); 1368 void loop_predication_follow_branches(Node *c, IdealLoopTree *loop, float loop_trip_cnt, 1369 PathFrequency& pf, Node_Stack& stack, VectorSet& seen, 1370 Node_List& if_proj_list); 1371 IfProjNode* add_template_assertion_predicate(IfNode* iff, IdealLoopTree* loop, IfProjNode* if_proj, IfProjNode* predicate_proj, 1372 IfProjNode* upper_bound_proj, int scale, Node* offset, Node* init, Node* limit, 1373 jint stride, Node* rng, bool& overflow, Deoptimization::DeoptReason reason); 1374 Node* add_range_check_elimination_assertion_predicate(IdealLoopTree* loop, Node* predicate_proj, int scale_con, 1375 Node* offset, Node* limit, jint stride_con, Node* value); 1376 1377 // Helper function to collect predicate for eliminating the useless ones 1378 void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1); 1379 void eliminate_useless_predicates(); 1380 void eliminate_useless_zero_trip_guard(); 1381 1382 // Change the control input of expensive nodes to allow commoning by 1383 // IGVN when it is guaranteed to not result in a more frequent 1384 // execution of the expensive node. Return true if progress. 1385 bool process_expensive_nodes(); 1386 1387 // Check whether node has become unreachable 1388 bool is_node_unreachable(Node *n) const { 1389 return !has_node(n) || n->is_unreachable(_igvn); 1390 } 1391 1392 // Eliminate range-checks and other trip-counter vs loop-invariant tests. 1393 void do_range_check(IdealLoopTree *loop, Node_List &old_new); 1394 1395 // Create a slow version of the loop by cloning the loop 1396 // and inserting an if to select fast-slow versions. 1397 // Return the inserted if. 1398 IfNode* create_slow_version_of_loop(IdealLoopTree *loop, 1399 Node_List &old_new, 1400 Node_List &unswitch_iffs, 1401 CloneLoopMode mode); 1402 1403 // Clone a loop and return the clone head (clone_loop_head). 1404 // Added nodes include int(1), int(0) - disconnected, If, IfTrue, IfFalse, 1405 // This routine was created for usage in CountedLoopReserveKit. 1406 // 1407 // int(1) -> If -> IfTrue -> original_loop_head 1408 // | 1409 // V 1410 // IfFalse -> clone_loop_head (returned by function pointer) 1411 // 1412 LoopNode* create_reserve_version_of_loop(IdealLoopTree *loop, CountedLoopReserveKit* lk); 1413 // Clone loop with an invariant test (that does not exit) and 1414 // insert a clone of the test that selects which version to 1415 // execute. 1416 void do_unswitching (IdealLoopTree *loop, Node_List &old_new); 1417 1418 // Find candidate "if" for unswitching 1419 IfNode* find_unswitching_candidate(const IdealLoopTree *loop, Node_List& unswitch_iffs) const; 1420 1421 // Range Check Elimination uses this function! 1422 // Constrain the main loop iterations so the affine function: 1423 // low_limit <= scale_con * I + offset < upper_limit 1424 // always holds true. That is, either increase the number of iterations in 1425 // the pre-loop or the post-loop until the condition holds true in the main 1426 // loop. Scale_con, offset and limit are all loop invariant. 1427 void add_constraint(jlong stride_con, jlong scale_con, Node* offset, Node* low_limit, Node* upper_limit, Node* pre_ctrl, Node** pre_limit, Node** main_limit); 1428 // Helper function for add_constraint(). 1429 Node* adjust_limit(bool reduce, Node* scale, Node* offset, Node* rc_limit, Node* old_limit, Node* pre_ctrl, bool round); 1430 1431 // Partially peel loop up through last_peel node. 1432 bool partial_peel( IdealLoopTree *loop, Node_List &old_new ); 1433 bool duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new); 1434 1435 // Move UnorderedReduction out of loop if possible 1436 void move_unordered_reduction_out_of_loop(IdealLoopTree* loop); 1437 1438 // Create a scheduled list of nodes control dependent on ctrl set. 1439 void scheduled_nodelist( IdealLoopTree *loop, VectorSet& ctrl, Node_List &sched ); 1440 // Has a use in the vector set 1441 bool has_use_in_set( Node* n, VectorSet& vset ); 1442 // Has use internal to the vector set (ie. not in a phi at the loop head) 1443 bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ); 1444 // clone "n" for uses that are outside of loop 1445 int clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ); 1446 // clone "n" for special uses that are in the not_peeled region 1447 void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n, 1448 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ); 1449 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist 1450 void insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ); 1451 #ifdef ASSERT 1452 // Validate the loop partition sets: peel and not_peel 1453 bool is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, VectorSet& not_peel ); 1454 // Ensure that uses outside of loop are of the right form 1455 bool is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list, 1456 uint orig_exit_idx, uint clone_exit_idx); 1457 bool is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx); 1458 #endif 1459 1460 // Returns nonzero constant stride if-node is a possible iv test (otherwise returns zero.) 1461 int stride_of_possible_iv( Node* iff ); 1462 bool is_possible_iv_test( Node* iff ) { return stride_of_possible_iv(iff) != 0; } 1463 // Return the (unique) control output node that's in the loop (if it exists.) 1464 Node* stay_in_loop( Node* n, IdealLoopTree *loop); 1465 // Insert a signed compare loop exit cloned from an unsigned compare. 1466 IfNode* insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop); 1467 void remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop); 1468 // Utility to register node "n" with PhaseIdealLoop 1469 void register_node(Node* n, IdealLoopTree* loop, Node* pred, uint ddepth); 1470 // Utility to create an if-projection 1471 ProjNode* proj_clone(ProjNode* p, IfNode* iff); 1472 // Force the iff control output to be the live_proj 1473 Node* short_circuit_if(IfNode* iff, ProjNode* live_proj); 1474 // Insert a region before an if projection 1475 RegionNode* insert_region_before_proj(ProjNode* proj); 1476 // Insert a new if before an if projection 1477 ProjNode* insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj); 1478 1479 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. 1480 // "Nearly" because all Nodes have been cloned from the original in the loop, 1481 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs 1482 // through the Phi recursively, and return a Bool. 1483 Node* clone_iff(PhiNode* phi); 1484 CmpNode* clone_bool(PhiNode* phi); 1485 1486 1487 // Rework addressing expressions to get the most loop-invariant stuff 1488 // moved out. We'd like to do all associative operators, but it's especially 1489 // important (common) to do address expressions. 1490 Node* remix_address_expressions(Node* n); 1491 Node* remix_address_expressions_add_left_shift(Node* n, IdealLoopTree* n_loop, Node* n_ctrl, BasicType bt); 1492 1493 // Convert add to muladd to generate MuladdS2I under certain criteria 1494 Node * convert_add_to_muladd(Node * n); 1495 1496 // Attempt to use a conditional move instead of a phi/branch 1497 Node *conditional_move( Node *n ); 1498 1499 // Check for aggressive application of 'split-if' optimization, 1500 // using basic block level info. 1501 void split_if_with_blocks ( VectorSet &visited, Node_Stack &nstack); 1502 Node *split_if_with_blocks_pre ( Node *n ); 1503 void split_if_with_blocks_post( Node *n ); 1504 Node *has_local_phi_input( Node *n ); 1505 // Mark an IfNode as being dominated by a prior test, 1506 // without actually altering the CFG (and hence IDOM info). 1507 void dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip = false, bool exclude_loop_predicate = false); 1508 1509 // Split Node 'n' through merge point 1510 RegionNode* split_thru_region(Node* n, RegionNode* region); 1511 // Split Node 'n' through merge point if there is enough win. 1512 Node *split_thru_phi( Node *n, Node *region, int policy ); 1513 // Found an If getting its condition-code input from a Phi in the 1514 // same block. Split thru the Region. 1515 void do_split_if(Node *iff, RegionNode** new_false_region = nullptr, RegionNode** new_true_region = nullptr); 1516 1517 // Conversion of fill/copy patterns into intrinsic versions 1518 bool do_intrinsify_fill(); 1519 bool intrinsify_fill(IdealLoopTree* lpt); 1520 bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value, 1521 Node*& shift, Node*& offset); 1522 1523 private: 1524 // Return a type based on condition control flow 1525 const TypeInt* filtered_type( Node *n, Node* n_ctrl); 1526 const TypeInt* filtered_type( Node *n ) { return filtered_type(n, nullptr); } 1527 // Helpers for filtered type 1528 const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl); 1529 1530 // Helper functions 1531 Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache ); 1532 Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true ); 1533 void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true ); 1534 bool split_up( Node *n, Node *blk1, Node *blk2 ); 1535 void sink_use( Node *use, Node *post_loop ); 1536 Node* place_outside_loop(Node* useblock, IdealLoopTree* loop) const; 1537 Node* try_move_store_before_loop(Node* n, Node *n_ctrl); 1538 void try_move_store_after_loop(Node* n); 1539 void move_flat_array_check_out_of_loop(Node* n); 1540 bool identical_backtoback_ifs(Node *n); 1541 bool flat_array_element_type_check(Node *n); 1542 bool can_split_if(Node *n_ctrl); 1543 bool cannot_split_division(const Node* n, const Node* region) const; 1544 static bool is_divisor_counted_loop_phi(const Node* divisor, const Node* loop); 1545 bool loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const; 1546 1547 // Determine if a method is too big for a/another round of split-if, based on 1548 // a magic (approximate) ratio derived from the equally magic constant 35000, 1549 // previously used for this purpose (but without relating to the node limit). 1550 bool must_throttle_split_if() { 1551 uint threshold = C->max_node_limit() * 2 / 5; 1552 return C->live_nodes() > threshold; 1553 } 1554 1555 // A simplistic node request tracking mechanism, where 1556 // = UINT_MAX Request not valid or made final. 1557 // < UINT_MAX Nodes currently requested (estimate). 1558 uint _nodes_required; 1559 1560 enum { REQUIRE_MIN = 70 }; 1561 1562 uint nodes_required() const { return _nodes_required; } 1563 1564 // Given the _currently_ available number of nodes, check whether there is 1565 // "room" for an additional request or not, considering the already required 1566 // number of nodes. Return TRUE if the new request is exceeding the node 1567 // budget limit, otherwise return FALSE. Note that this interpretation will 1568 // act pessimistic on additional requests when new nodes have already been 1569 // generated since the 'begin'. This behaviour fits with the intention that 1570 // node estimates/requests should be made upfront. 1571 bool exceeding_node_budget(uint required = 0) { 1572 assert(C->live_nodes() < C->max_node_limit(), "sanity"); 1573 uint available = C->max_node_limit() - C->live_nodes(); 1574 return available < required + _nodes_required + REQUIRE_MIN; 1575 } 1576 1577 uint require_nodes(uint require, uint minreq = REQUIRE_MIN) { 1578 precond(require > 0); 1579 _nodes_required += MAX2(require, minreq); 1580 return _nodes_required; 1581 } 1582 1583 bool may_require_nodes(uint require, uint minreq = REQUIRE_MIN) { 1584 return !exceeding_node_budget(require) && require_nodes(require, minreq) > 0; 1585 } 1586 1587 uint require_nodes_begin() { 1588 assert(_nodes_required == UINT_MAX, "Bad state (begin)."); 1589 _nodes_required = 0; 1590 return C->live_nodes(); 1591 } 1592 1593 // When a node request is final, optionally check that the requested number 1594 // of nodes was reasonably correct with respect to the number of new nodes 1595 // introduced since the last 'begin'. Always check that we have not exceeded 1596 // the maximum node limit. 1597 void require_nodes_final(uint live_at_begin, bool check_estimate) { 1598 assert(_nodes_required < UINT_MAX, "Bad state (final)."); 1599 1600 #ifdef ASSERT 1601 if (check_estimate) { 1602 // Check that the node budget request was not off by too much (x2). 1603 // Should this be the case we _surely_ need to improve the estimates 1604 // used in our budget calculations. 1605 if (C->live_nodes() - live_at_begin > 2 * _nodes_required) { 1606 log_info(compilation)("Bad node estimate: actual = %d >> request = %d", 1607 C->live_nodes() - live_at_begin, _nodes_required); 1608 } 1609 } 1610 #endif 1611 // Assert that we have stayed within the node budget limit. 1612 assert(C->live_nodes() < C->max_node_limit(), 1613 "Exceeding node budget limit: %d + %d > %d (request = %d)", 1614 C->live_nodes() - live_at_begin, live_at_begin, 1615 C->max_node_limit(), _nodes_required); 1616 1617 _nodes_required = UINT_MAX; 1618 } 1619 1620 // Clone Parse Predicates to slow and fast loop when unswitching a loop 1621 void clone_parse_and_assertion_predicates_to_unswitched_loop(IdealLoopTree* loop, Node_List& old_new, 1622 IfProjNode*& iffast_pred, IfProjNode*& ifslow_pred); 1623 IfProjNode* clone_parse_predicate_to_unswitched_loop(ParsePredicateSuccessProj* predicate_proj, Node* new_entry, 1624 Deoptimization::DeoptReason reason, bool slow_loop); 1625 void clone_assertion_predicates_to_unswitched_loop(IdealLoopTree* loop, const Node_List& old_new, 1626 Deoptimization::DeoptReason reason, IfProjNode* old_predicate_proj, 1627 IfProjNode* iffast_pred, IfProjNode* ifslow_pred); 1628 IfProjNode* clone_assertion_predicate_for_unswitched_loops(Node* iff, IfProjNode* predicate, 1629 Deoptimization::DeoptReason reason, 1630 IfProjNode* output_proj); 1631 static void check_cloned_parse_predicate_for_unswitching(const Node* new_entry, bool is_fast_loop) PRODUCT_RETURN; 1632 1633 bool _created_loop_node; 1634 DEBUG_ONLY(void dump_idoms(Node* early, Node* wrong_lca);) 1635 NOT_PRODUCT(void dump_idoms_in_reverse(const Node* n, const Node_List& idom_list) const;) 1636 1637 public: 1638 void set_created_loop_node() { _created_loop_node = true; } 1639 bool created_loop_node() { return _created_loop_node; } 1640 void register_new_node(Node* n, Node* blk); 1641 1642 #ifdef ASSERT 1643 void dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA); 1644 #endif 1645 1646 #ifndef PRODUCT 1647 void dump() const; 1648 void dump_idom(Node* n) const { dump_idom(n, 1000); } // For debugging 1649 void dump_idom(Node* n, uint count) const; 1650 void get_idoms(Node* n, uint count, Unique_Node_List& idoms) const; 1651 void dump(IdealLoopTree* loop, uint rpo_idx, Node_List &rpo_list) const; 1652 IdealLoopTree* get_loop_idx(Node* n) const { 1653 // Dead nodes have no loop, so return the top level loop instead 1654 return _loop_or_ctrl[n->_idx] ? (IdealLoopTree*)_loop_or_ctrl[n->_idx] : _ltree_root; 1655 } 1656 // Print some stats 1657 static void print_statistics(); 1658 static int _loop_invokes; // Count of PhaseIdealLoop invokes 1659 static int _loop_work; // Sum of PhaseIdealLoop x _unique 1660 static volatile int _long_loop_candidates; 1661 static volatile int _long_loop_nests; 1662 static volatile int _long_loop_counted_loops; 1663 #endif 1664 1665 #ifdef ASSERT 1666 void verify() const; 1667 bool verify_idom_and_nodes(Node* root, const PhaseIdealLoop* phase_verify) const; 1668 bool verify_idom(Node* n, const PhaseIdealLoop* phase_verify) const; 1669 bool verify_loop_ctrl(Node* n, const PhaseIdealLoop* phase_verify) const; 1670 #endif 1671 1672 void rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list) const; 1673 1674 void check_counted_loop_shape(IdealLoopTree* loop, Node* x, BasicType bt) NOT_DEBUG_RETURN; 1675 1676 LoopNode* create_inner_head(IdealLoopTree* loop, BaseCountedLoopNode* head, IfNode* exit_test); 1677 1678 1679 int extract_long_range_checks(const IdealLoopTree* loop, jlong stride_con, int iters_limit, PhiNode* phi, 1680 Node_List &range_checks); 1681 1682 void transform_long_range_checks(int stride_con, const Node_List &range_checks, Node* outer_phi, 1683 Node* inner_iters_actual_int, Node* inner_phi, 1684 Node* iv_add, LoopNode* inner_head); 1685 1686 Node* get_late_ctrl_with_anti_dep(LoadNode* n, Node* early, Node* LCA); 1687 1688 bool ctrl_of_use_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop, Node* ctrl); 1689 1690 bool ctrl_of_all_uses_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop); 1691 1692 Node* compute_early_ctrl(Node* n, Node* n_ctrl); 1693 1694 void try_sink_out_of_loop(Node* n); 1695 1696 Node* clamp(Node* R, Node* L, Node* H); 1697 1698 bool safe_for_if_replacement(const Node* dom) const; 1699 1700 void push_pinned_nodes_thru_region(IfNode* dom_if, Node* region); 1701 1702 bool try_merge_identical_ifs(Node* n); 1703 1704 void clone_loop_body(const Node_List& body, Node_List &old_new, CloneMap* cm); 1705 1706 void fix_body_edges(const Node_List &body, IdealLoopTree* loop, const Node_List &old_new, int dd, 1707 IdealLoopTree* parent, bool partial); 1708 1709 void fix_ctrl_uses(const Node_List& body, const IdealLoopTree* loop, Node_List &old_new, CloneLoopMode mode, 1710 Node* side_by_side_idom, CloneMap* cm, Node_List &worklist); 1711 1712 void fix_data_uses(Node_List& body, IdealLoopTree* loop, CloneLoopMode mode, IdealLoopTree* outer_loop, 1713 uint new_counter, Node_List& old_new, Node_List& worklist, Node_List*& split_if_set, 1714 Node_List*& split_bool_set, Node_List*& split_cex_set); 1715 1716 void finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set); 1717 1718 bool clone_cmp_down(Node* n, const Node* blk1, const Node* blk2); 1719 1720 void clone_loadklass_nodes_at_cmp_index(const Node* n, Node* cmp, int i); 1721 1722 bool clone_cmp_loadklass_down(Node* n, const Node* blk1, const Node* blk2); 1723 1724 bool at_relevant_ctrl(Node* n, const Node* blk1, const Node* blk2); 1725 }; 1726 1727 1728 class AutoNodeBudget : public StackObj 1729 { 1730 public: 1731 enum budget_check_t { BUDGET_CHECK, NO_BUDGET_CHECK }; 1732 1733 AutoNodeBudget(PhaseIdealLoop* phase, budget_check_t chk = BUDGET_CHECK) 1734 : _phase(phase), 1735 _check_at_final(chk == BUDGET_CHECK), 1736 _nodes_at_begin(0) 1737 { 1738 precond(_phase != nullptr); 1739 1740 _nodes_at_begin = _phase->require_nodes_begin(); 1741 } 1742 1743 ~AutoNodeBudget() { 1744 #ifndef PRODUCT 1745 if (TraceLoopOpts) { 1746 uint request = _phase->nodes_required(); 1747 uint delta = _phase->C->live_nodes() - _nodes_at_begin; 1748 1749 if (request < delta) { 1750 tty->print_cr("Exceeding node budget: %d < %d", request, delta); 1751 } else { 1752 uint const REQUIRE_MIN = PhaseIdealLoop::REQUIRE_MIN; 1753 // Identify the worst estimates as "poor" ones. 1754 if (request > REQUIRE_MIN && delta > 0) { 1755 if ((delta > REQUIRE_MIN && request > 3 * delta) || 1756 (delta <= REQUIRE_MIN && request > 10 * delta)) { 1757 tty->print_cr("Poor node estimate: %d >> %d", request, delta); 1758 } 1759 } 1760 } 1761 } 1762 #endif // PRODUCT 1763 _phase->require_nodes_final(_nodes_at_begin, _check_at_final); 1764 } 1765 1766 private: 1767 PhaseIdealLoop* _phase; 1768 bool _check_at_final; 1769 uint _nodes_at_begin; 1770 }; 1771 1772 1773 // This kit may be used for making of a reserved copy of a loop before this loop 1774 // goes under non-reversible changes. 1775 // 1776 // Function create_reserve() creates a reserved copy (clone) of the loop. 1777 // The reserved copy is created by calling 1778 // PhaseIdealLoop::create_reserve_version_of_loop - see there how 1779 // the original and reserved loops are connected in the outer graph. 1780 // If create_reserve succeeded, it returns 'true' and _has_reserved is set to 'true'. 1781 // 1782 // By default the reserved copy (clone) of the loop is created as dead code - it is 1783 // dominated in the outer loop by this node chain: 1784 // intcon(1)->If->IfFalse->reserved_copy. 1785 // The original loop is dominated by the same node chain but IfTrue projection: 1786 // intcon(0)->If->IfTrue->original_loop. 1787 // 1788 // In this implementation of CountedLoopReserveKit the ctor includes create_reserve() 1789 // and the dtor, checks _use_new value. 1790 // If _use_new == false, it "switches" control to reserved copy of the loop 1791 // by simple replacing of node intcon(1) with node intcon(0). 1792 // 1793 // Here is a proposed example of usage (see also SuperWord::output in superword.cpp). 1794 // 1795 // void CountedLoopReserveKit_example() 1796 // { 1797 // CountedLoopReserveKit lrk((phase, lpt, DoReserveCopy = true); // create local object 1798 // if (DoReserveCopy && !lrk.has_reserved()) { 1799 // return; //failed to create reserved loop copy 1800 // } 1801 // ... 1802 // //something is wrong, switch to original loop 1803 /// if(something_is_wrong) return; // ~CountedLoopReserveKit makes the switch 1804 // ... 1805 // //everything worked ok, return with the newly modified loop 1806 // lrk.use_new(); 1807 // return; // ~CountedLoopReserveKit does nothing once use_new() was called 1808 // } 1809 // 1810 // Keep in mind, that by default if create_reserve() is not followed by use_new() 1811 // the dtor will "switch to the original" loop. 1812 // NOTE. You you modify outside of the original loop this class is no help. 1813 // 1814 class CountedLoopReserveKit { 1815 private: 1816 PhaseIdealLoop* _phase; 1817 IdealLoopTree* _lpt; 1818 LoopNode* _lp; 1819 IfNode* _iff; 1820 LoopNode* _lp_reserved; 1821 bool _has_reserved; 1822 bool _use_new; 1823 const bool _active; //may be set to false in ctor, then the object is dummy 1824 1825 public: 1826 CountedLoopReserveKit(PhaseIdealLoop* phase, IdealLoopTree *loop, bool active); 1827 ~CountedLoopReserveKit(); 1828 void use_new() {_use_new = true;} 1829 void set_iff(IfNode* x) {_iff = x;} 1830 bool has_reserved() const { return _active && _has_reserved;} 1831 private: 1832 bool create_reserve(); 1833 };// class CountedLoopReserveKit 1834 1835 inline Node* IdealLoopTree::tail() { 1836 // Handle lazy update of _tail field. 1837 if (_tail->in(0) == nullptr) { 1838 _tail = _phase->get_ctrl(_tail); 1839 } 1840 return _tail; 1841 } 1842 1843 inline Node* IdealLoopTree::head() { 1844 // Handle lazy update of _head field. 1845 if (_head->in(0) == nullptr) { 1846 _head = _phase->get_ctrl(_head); 1847 } 1848 return _head; 1849 } 1850 1851 // Iterate over the loop tree using a preorder, left-to-right traversal. 1852 // 1853 // Example that visits all counted loops from within PhaseIdealLoop 1854 // 1855 // for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { 1856 // IdealLoopTree* lpt = iter.current(); 1857 // if (!lpt->is_counted()) continue; 1858 // ... 1859 class LoopTreeIterator : public StackObj { 1860 private: 1861 IdealLoopTree* _root; 1862 IdealLoopTree* _curnt; 1863 1864 public: 1865 LoopTreeIterator(IdealLoopTree* root) : _root(root), _curnt(root) {} 1866 1867 bool done() { return _curnt == nullptr; } // Finished iterating? 1868 1869 void next(); // Advance to next loop tree 1870 1871 IdealLoopTree* current() { return _curnt; } // Return current value of iterator. 1872 }; 1873 1874 // Compute probability of reaching some CFG node from a fixed 1875 // dominating CFG node 1876 class PathFrequency { 1877 private: 1878 Node* _dom; // frequencies are computed relative to this node 1879 Node_Stack _stack; 1880 GrowableArray<float> _freqs_stack; // keep track of intermediate result at regions 1881 GrowableArray<float> _freqs; // cache frequencies 1882 PhaseIdealLoop* _phase; 1883 1884 float check_and_truncate_frequency(float f) { 1885 assert(f >= 0, "Incorrect frequency"); 1886 // We do not perform an exact (f <= 1) check 1887 // this would be error prone with rounding of floats. 1888 // Performing a check like (f <= 1+eps) would be of benefit, 1889 // however, it is not evident how to determine such an eps, 1890 // given that an arbitrary number of add/mul operations 1891 // are performed on these frequencies. 1892 return (f > 1) ? 1 : f; 1893 } 1894 1895 public: 1896 PathFrequency(Node* dom, PhaseIdealLoop* phase) 1897 : _dom(dom), _stack(0), _phase(phase) { 1898 } 1899 1900 float to(Node* n); 1901 }; 1902 1903 // Utility class to work on predicates. 1904 class Predicates { 1905 public: 1906 static Node* skip_all_predicates(Node* node); 1907 static Node* skip_all_predicates(ParsePredicates& parse_predicates); 1908 static Node* skip_predicates_in_block(ParsePredicateSuccessProj* parse_predicate_success_proj); 1909 static IfProjNode* next_predicate_proj_in_block(IfProjNode* proj); 1910 static bool has_profiled_loop_predicates(ParsePredicates& parse_predicates); 1911 }; 1912 1913 // Class representing the Parse Predicates that are added during parsing with ParsePredicateNodes. 1914 class ParsePredicates { 1915 private: 1916 ParsePredicateSuccessProj* _loop_predicate_proj = nullptr; 1917 ParsePredicateSuccessProj* _profiled_loop_predicate_proj = nullptr; 1918 ParsePredicateSuccessProj* _loop_limit_check_predicate_proj = nullptr; 1919 // The success projection of the Parse Predicate that comes first when starting from root. 1920 ParsePredicateSuccessProj* _top_predicate_proj; 1921 ParsePredicateSuccessProj* _starting_proj; 1922 1923 void find_parse_predicate_projections(); 1924 static bool is_uct_proj(Node* node, Deoptimization::DeoptReason deopt_reason); 1925 static ParsePredicateNode* get_parse_predicate_or_null(Node* proj); 1926 bool assign_predicate_proj(ParsePredicateSuccessProj* parse_predicate_proj); 1927 public: 1928 ParsePredicates(Node* starting_proj); 1929 1930 // Success projection of Loop Parse Predicate. 1931 ParsePredicateSuccessProj* loop_predicate_proj() { 1932 return _loop_predicate_proj; 1933 } 1934 1935 // Success proj of Profiled Loop Parse Predicate. 1936 ParsePredicateSuccessProj* profiled_loop_predicate_proj() { 1937 return _profiled_loop_predicate_proj; 1938 } 1939 1940 // Success proj of Loop Limit Check Parse Predicate. 1941 ParsePredicateSuccessProj* loop_limit_check_predicate_proj() { 1942 return _loop_limit_check_predicate_proj; 1943 } 1944 1945 // Return the success projection of the Parse Predicate that comes first when starting from root. 1946 ParsePredicateSuccessProj* get_top_predicate_proj() { 1947 return _top_predicate_proj; 1948 } 1949 1950 static bool is_success_proj(Node* node); 1951 1952 // Are there any Parse Predicates? 1953 bool has_any() const { 1954 return _top_predicate_proj != nullptr; 1955 } 1956 1957 static bool is_loop_limit_check_predicate_proj(Node* node) { 1958 ParsePredicateNode* parse_predicate = get_parse_predicate_or_null(node); 1959 return parse_predicate != nullptr && parse_predicate->deopt_reason() == Deoptimization::DeoptReason::Reason_loop_limit_check; 1960 } 1961 }; 1962 #endif // SHARE_OPTO_LOOPNODE_HPP