1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "libadt/vectset.hpp" 26 #include "memory/allocation.inline.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "compiler/compilerDirectives.hpp" 29 #include "opto/block.hpp" 30 #include "opto/cfgnode.hpp" 31 #include "opto/chaitin.hpp" 32 #include "opto/loopnode.hpp" 33 #include "opto/machnode.hpp" 34 #include "opto/matcher.hpp" 35 #include "opto/opcodes.hpp" 36 #include "opto/rootnode.hpp" 37 #include "utilities/copy.hpp" 38 #include "utilities/powerOfTwo.hpp" 39 40 void Block_Array::grow(uint i) { 41 assert(i >= Max(), "Should have been checked before, use maybe_grow?"); 42 DEBUG_ONLY(_limit = i+1); 43 if( i < _size ) return; 44 if( !_size ) { 45 _size = 1; 46 _blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) ); 47 _blocks[0] = nullptr; 48 } 49 uint old = _size; 50 _size = next_power_of_2(i); 51 _blocks = (Block**)_arena->Arealloc( _blocks, old*sizeof(Block*),_size*sizeof(Block*)); 52 Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) ); 53 } 54 55 void Block_List::remove(uint i) { 56 assert(i < _cnt, "index out of bounds"); 57 Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*))); 58 pop(); // shrink list by one block 59 } 60 61 void Block_List::insert(uint i, Block *b) { 62 push(b); // grow list by one block 63 Copy::conjoint_words_to_higher((HeapWord*)&_blocks[i], (HeapWord*)&_blocks[i+1], ((_cnt-i-1)*sizeof(Block*))); 64 _blocks[i] = b; 65 } 66 67 #ifndef PRODUCT 68 void Block_List::print() { 69 for (uint i=0; i < size(); i++) { 70 tty->print("B%d ", _blocks[i]->_pre_order); 71 } 72 tty->print("size = %d\n", size()); 73 } 74 #endif 75 76 uint Block::code_alignment() const { 77 // Check for Root block 78 if (_pre_order == 0) return CodeEntryAlignment; 79 // Check for Start block 80 if (_pre_order == 1) return InteriorEntryAlignment; 81 // Check for loop alignment 82 if (has_loop_alignment()) return loop_alignment(); 83 84 return relocInfo::addr_unit(); // no particular alignment 85 } 86 87 uint Block::compute_loop_alignment() { 88 Node *h = head(); 89 int unit_sz = relocInfo::addr_unit(); 90 if (h->is_Loop() && h->as_Loop()->is_inner_loop()) { 91 // Pre- and post-loops have low trip count so do not bother with 92 // NOPs for align loop head. The constants are hidden from tuning 93 // but only because my "divide by 4" heuristic surely gets nearly 94 // all possible gain (a "do not align at all" heuristic has a 95 // chance of getting a really tiny gain). 96 if (h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() || 97 h->as_CountedLoop()->is_post_loop())) { 98 return (OptoLoopAlignment > 4*unit_sz) ? (OptoLoopAlignment>>2) : unit_sz; 99 } 100 // Loops with low backedge frequency should not be aligned. 101 Node *n = h->in(LoopNode::LoopBackControl)->in(0); 102 if (n->is_MachIf() && n->as_MachIf()->_prob < 0.01) { 103 return unit_sz; // Loop does not loop, more often than not! 104 } 105 return OptoLoopAlignment; // Otherwise align loop head 106 } 107 108 return unit_sz; // no particular alignment 109 } 110 111 // Compute the size of first 'inst_cnt' instructions in this block. 112 // Return the number of instructions left to compute if the block has 113 // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size 114 // exceeds OptoLoopAlignment. 115 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt, 116 PhaseRegAlloc* ra) { 117 uint last_inst = number_of_nodes(); 118 for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) { 119 uint inst_size = get_node(j)->size(ra); 120 if( inst_size > 0 ) { 121 inst_cnt--; 122 uint sz = sum_size + inst_size; 123 if( sz <= (uint)OptoLoopAlignment ) { 124 // Compute size of instructions which fit into fetch buffer only 125 // since all inst_cnt instructions will not fit even if we align them. 126 sum_size = sz; 127 } else { 128 return 0; 129 } 130 } 131 } 132 return inst_cnt; 133 } 134 135 uint Block::find_node( const Node *n ) const { 136 for( uint i = 0; i < number_of_nodes(); i++ ) { 137 if( get_node(i) == n ) 138 return i; 139 } 140 ShouldNotReachHere(); 141 return 0; 142 } 143 144 // Find and remove n from block list 145 void Block::find_remove( const Node *n ) { 146 remove_node(find_node(n)); 147 } 148 149 bool Block::contains(const Node *n) const { 150 return _nodes.contains(n); 151 } 152 153 bool Block::is_trivially_unreachable() const { 154 return num_preds() <= 1 && !head()->is_Root() && !head()->is_Start(); 155 } 156 157 // Return empty status of a block. Empty blocks contain only the head, other 158 // ideal nodes, and an optional trailing goto. 159 int Block::is_Empty() const { 160 161 // Root or start block is not considered empty 162 if (head()->is_Root() || head()->is_Start()) { 163 return not_empty; 164 } 165 166 int success_result = completely_empty; 167 int end_idx = number_of_nodes() - 1; 168 169 // Check for ending goto 170 if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) { 171 success_result = empty_with_goto; 172 end_idx--; 173 } 174 175 // Unreachable blocks are considered empty 176 if (is_trivially_unreachable()) { 177 return success_result; 178 } 179 180 // Ideal nodes (except BoxLock) are allowable in empty blocks: skip them. Only 181 // Mach and BoxLock nodes turn directly into code via emit(). 182 while ((end_idx > 0) && 183 !get_node(end_idx)->is_Mach() && 184 !get_node(end_idx)->is_BoxLock()) { 185 end_idx--; 186 } 187 188 // No room for any interesting instructions? 189 if (end_idx == 0) { 190 return success_result; 191 } 192 193 return not_empty; 194 } 195 196 // Return true if the block's code implies that it is likely to be 197 // executed infrequently. Check to see if the block ends in a Halt or 198 // a low probability call. 199 bool Block::has_uncommon_code() const { 200 Node* en = end(); 201 202 if (en->is_MachGoto()) 203 en = en->in(0); 204 if (en->is_Catch()) 205 en = en->in(0); 206 if (en->is_MachProj() && en->in(0)->is_MachCall()) { 207 MachCallNode* call = en->in(0)->as_MachCall(); 208 if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) { 209 // This is true for slow-path stubs like new_{instance,array}, 210 // slow_arraycopy, complete_monitor_locking, uncommon_trap. 211 // The magic number corresponds to the probability of an uncommon_trap, 212 // even though it is a count not a probability. 213 return true; 214 } 215 } 216 217 int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode(); 218 return op == Op_Halt; 219 } 220 221 // True if block is low enough frequency or guarded by a test which 222 // mostly does not go here. 223 bool PhaseCFG::is_uncommon(const Block* block) { 224 // Initial blocks must never be moved, so are never uncommon. 225 if (block->head()->is_Root() || block->head()->is_Start()) return false; 226 227 // Check for way-low freq 228 if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true; 229 230 // Look for code shape indicating uncommon_trap or slow path 231 if (block->has_uncommon_code()) return true; 232 233 const float epsilon = 0.05f; 234 const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon); 235 uint uncommon_preds = 0; 236 uint freq_preds = 0; 237 uint uncommon_for_freq_preds = 0; 238 239 for( uint i=1; i< block->num_preds(); i++ ) { 240 Block* guard = get_block_for_node(block->pred(i)); 241 // Check to see if this block follows its guard 1 time out of 10000 242 // or less. 243 // 244 // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which 245 // we intend to be "uncommon", such as slow-path TLE allocation, 246 // predicted call failure, and uncommon trap triggers. 247 // 248 // Use an epsilon value of 5% to allow for variability in frequency 249 // predictions and floating point calculations. The net effect is 250 // that guard_factor is set to 9500. 251 // 252 // Ignore low-frequency blocks. 253 // The next check is (guard->_freq < 1.e-5 * 9500.). 254 if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) { 255 uncommon_preds++; 256 } else { 257 freq_preds++; 258 if(block->_freq < guard->_freq * guard_factor ) { 259 uncommon_for_freq_preds++; 260 } 261 } 262 } 263 if( block->num_preds() > 1 && 264 // The block is uncommon if all preds are uncommon or 265 (uncommon_preds == (block->num_preds()-1) || 266 // it is uncommon for all frequent preds. 267 uncommon_for_freq_preds == freq_preds) ) { 268 return true; 269 } 270 return false; 271 } 272 273 #ifndef PRODUCT 274 void Block::dump_bidx(const Block* orig, outputStream* st) const { 275 if (_pre_order) st->print("B%d", _pre_order); 276 else st->print("N%d", head()->_idx); 277 278 if (Verbose && orig != this) { 279 // Dump the original block's idx 280 st->print(" ("); 281 orig->dump_bidx(orig, st); 282 st->print(")"); 283 } 284 } 285 286 void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const { 287 if (is_connector()) { 288 for (uint i=1; i<num_preds(); i++) { 289 Block *p = cfg->get_block_for_node(pred(i)); 290 p->dump_pred(cfg, orig, st); 291 } 292 } else { 293 dump_bidx(orig, st); 294 st->print(" "); 295 } 296 } 297 298 void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const { 299 // Print the basic block. 300 dump_bidx(this, st); 301 st->print(": "); 302 303 // Print the outgoing CFG edges. 304 st->print("#\tout( "); 305 for( uint i=0; i<_num_succs; i++ ) { 306 non_connector_successor(i)->dump_bidx(_succs[i], st); 307 st->print(" "); 308 } 309 310 // Print the incoming CFG edges. 311 st->print(") <- "); 312 if( head()->is_block_start() ) { 313 st->print("in( "); 314 for (uint i=1; i<num_preds(); i++) { 315 Node *s = pred(i); 316 if (cfg != nullptr) { 317 Block *p = cfg->get_block_for_node(s); 318 p->dump_pred(cfg, p, st); 319 } else { 320 while (!s->is_block_start()) { 321 s = s->in(0); 322 } 323 st->print("N%d ", s->_idx ); 324 } 325 } 326 st->print(") "); 327 } else { 328 st->print("BLOCK HEAD IS JUNK "); 329 } 330 331 // Print loop, if any 332 const Block *bhead = this; // Head of self-loop 333 Node *bh = bhead->head(); 334 335 if ((cfg != nullptr) && bh->is_Loop() && !head()->is_Root()) { 336 LoopNode *loop = bh->as_Loop(); 337 const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl)); 338 while (bx->is_connector()) { 339 bx = cfg->get_block_for_node(bx->pred(1)); 340 } 341 st->print("Loop( B%d-B%d ", bhead->_pre_order, bx->_pre_order); 342 // Dump any loop-specific bits, especially for CountedLoops. 343 loop->dump_spec(st); 344 st->print(")"); 345 } else if (has_loop_alignment()) { 346 st->print("top-of-loop"); 347 } 348 349 // Print frequency and other optimization-relevant information 350 st->print(" Freq: %g",_freq); 351 if( Verbose || WizardMode ) { 352 st->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth); 353 st->print(" RegPressure: %d",_reg_pressure); 354 st->print(" IHRP Index: %d",_ihrp_index); 355 st->print(" FRegPressure: %d",_freg_pressure); 356 st->print(" FHRP Index: %d",_fhrp_index); 357 } 358 st->cr(); 359 } 360 361 void Block::dump() const { 362 dump(nullptr); 363 } 364 365 void Block::dump(const PhaseCFG* cfg) const { 366 dump_head(cfg); 367 for (uint i=0; i< number_of_nodes(); i++) { 368 get_node(i)->dump(); 369 } 370 tty->print("\n"); 371 } 372 #endif 373 374 PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher) 375 : Phase(CFG) 376 , _root(root) 377 , _blocks(arena) 378 , _block_arena(arena) 379 , _regalloc(nullptr) 380 , _scheduling_for_pressure(false) 381 , _matcher(matcher) 382 , _node_to_block_mapping(arena) 383 , _node_latency(nullptr) 384 #ifndef PRODUCT 385 , _trace_opto_pipelining(C->directive()->TraceOptoPipeliningOption) 386 #endif 387 #ifdef ASSERT 388 , _raw_oops(arena) 389 #endif 390 { 391 ResourceMark rm; 392 // I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode, 393 // then Match it into a machine-specific Node. Then clone the machine 394 // Node on demand. 395 Node *x = new GotoNode(nullptr); 396 x->init_req(0, x); 397 _goto = matcher.match_tree(x); 398 assert(_goto != nullptr || C->failure_is_artificial(), ""); 399 if (C->failing()) { 400 return; 401 } 402 _goto->set_req(0,_goto); 403 404 // Build the CFG in Reverse Post Order 405 _number_of_blocks = build_cfg(); 406 _root_block = get_block_for_node(_root); 407 } 408 409 // Build a proper looking CFG. Make every block begin with either a StartNode 410 // or a RegionNode. Make every block end with either a Goto, If or Return. 411 // The RootNode both starts and ends it's own block. Do this with a recursive 412 // backwards walk over the control edges. 413 uint PhaseCFG::build_cfg() { 414 VectorSet visited; 415 416 // Allocate stack with enough space to avoid frequent realloc 417 Node_Stack nstack(C->live_nodes() >> 1); 418 nstack.push(_root, 0); 419 uint sum = 0; // Counter for blocks 420 421 while (nstack.is_nonempty()) { 422 // node and in's index from stack's top 423 // 'np' is _root (see above) or RegionNode, StartNode: we push on stack 424 // only nodes which point to the start of basic block (see below). 425 Node *np = nstack.node(); 426 // idx > 0, except for the first node (_root) pushed on stack 427 // at the beginning when idx == 0. 428 // We will use the condition (idx == 0) later to end the build. 429 uint idx = nstack.index(); 430 Node *proj = np->in(idx); 431 const Node *x = proj->is_block_proj(); 432 // Does the block end with a proper block-ending Node? One of Return, 433 // If or Goto? (This check should be done for visited nodes also). 434 if (x == nullptr) { // Does not end right... 435 Node *g = _goto->clone(); // Force it to end in a Goto 436 g->set_req(0, proj); 437 np->set_req(idx, g); 438 x = proj = g; 439 } 440 if (!visited.test_set(x->_idx)) { // Visit this block once 441 // Skip any control-pinned middle'in stuff 442 Node *p = proj; 443 do { 444 proj = p; // Update pointer to last Control 445 p = p->in(0); // Move control forward 446 } while( !p->is_block_proj() && 447 !p->is_block_start() ); 448 // Make the block begin with one of Region or StartNode. 449 if( !p->is_block_start() ) { 450 RegionNode *r = new RegionNode( 2 ); 451 r->init_req(1, p); // Insert RegionNode in the way 452 proj->set_req(0, r); // Insert RegionNode in the way 453 p = r; 454 } 455 // 'p' now points to the start of this basic block 456 457 // Put self in array of basic blocks 458 Block *bb = new (_block_arena) Block(_block_arena, p); 459 map_node_to_block(p, bb); 460 map_node_to_block(x, bb); 461 if( x != p ) { // Only for root is x == p 462 bb->push_node((Node*)x); 463 } 464 // Now handle predecessors 465 ++sum; // Count 1 for self block 466 uint cnt = bb->num_preds(); 467 for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors 468 Node *prevproj = p->in(i); // Get prior input 469 assert( !prevproj->is_Con(), "dead input not removed" ); 470 // Check to see if p->in(i) is a "control-dependent" CFG edge - 471 // i.e., it splits at the source (via an IF or SWITCH) and merges 472 // at the destination (via a many-input Region). 473 // This breaks critical edges. The RegionNode to start the block 474 // will be added when <p,i> is pulled off the node stack 475 if ( cnt > 2 ) { // Merging many things? 476 assert( prevproj== bb->pred(i),""); 477 if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge? 478 // Force a block on the control-dependent edge 479 Node *g = _goto->clone(); // Force it to end in a Goto 480 g->set_req(0,prevproj); 481 p->set_req(i,g); 482 } 483 } 484 nstack.push(p, i); // 'p' is RegionNode or StartNode 485 } 486 } else { // Post-processing visited nodes 487 nstack.pop(); // remove node from stack 488 // Check if it the fist node pushed on stack at the beginning. 489 if (idx == 0) break; // end of the build 490 // Find predecessor basic block 491 Block *pb = get_block_for_node(x); 492 // Insert into nodes array, if not already there 493 if (!has_block(proj)) { 494 assert( x != proj, "" ); 495 // Map basic block of projection 496 map_node_to_block(proj, pb); 497 pb->push_node(proj); 498 } 499 // Insert self as a child of my predecessor block 500 pb->_succs.map(pb->_num_succs++, get_block_for_node(np)); 501 assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(), 502 "too many control users, not a CFG?" ); 503 } 504 } 505 // Return number of basic blocks for all children and self 506 return sum; 507 } 508 509 // Inserts a goto & corresponding basic block between 510 // block[block_no] and its succ_no'th successor block 511 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) { 512 // get block with block_no 513 assert(block_no < number_of_blocks(), "illegal block number"); 514 Block* in = get_block(block_no); 515 // get successor block succ_no 516 assert(succ_no < in->_num_succs, "illegal successor number"); 517 Block* out = in->_succs[succ_no]; 518 // Compute frequency of the new block. Do this before inserting 519 // new block in case succ_prob() needs to infer the probability from 520 // surrounding blocks. 521 float freq = in->_freq * in->succ_prob(succ_no); 522 // get ProjNode corresponding to the succ_no'th successor of the in block 523 ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj(); 524 // create region for basic block 525 RegionNode* region = new RegionNode(2); 526 region->init_req(1, proj); 527 // setup corresponding basic block 528 Block* block = new (_block_arena) Block(_block_arena, region); 529 map_node_to_block(region, block); 530 C->regalloc()->set_bad(region->_idx); 531 // add a goto node 532 Node* gto = _goto->clone(); // get a new goto node 533 gto->set_req(0, region); 534 // add it to the basic block 535 block->push_node(gto); 536 map_node_to_block(gto, block); 537 C->regalloc()->set_bad(gto->_idx); 538 // hook up successor block 539 block->_succs.map(block->_num_succs++, out); 540 // remap successor's predecessors if necessary 541 for (uint i = 1; i < out->num_preds(); i++) { 542 if (out->pred(i) == proj) out->head()->set_req(i, gto); 543 } 544 // remap predecessor's successor to new block 545 in->_succs.map(succ_no, block); 546 // Set the frequency of the new block 547 block->_freq = freq; 548 // add new basic block to basic block list 549 add_block_at(block_no + 1, block); 550 // Update dominator tree information of the new goto block. 551 block->_idom = in; 552 block->_dom_depth = in->_dom_depth + 1; 553 if (out->_idom != in) { 554 // The successor block was not immediately dominated by the predecessor 555 // block, so there is no dominator subtree to update. 556 return; 557 } 558 // Update immediate dominator of the successor block. 559 out->_idom = block; 560 // Increment the dominator tree depth of the goto block's descendants. These 561 // are found by a depth-first search starting from the successor block. Two 562 // domination properties guarantee that only descendant blocks are visited: 563 // 1) all dominators of a block b must appear in any path from the root to b; 564 // 2) if a block b does not dominate another block b', b cannot dominate any 565 // block reachable from b' either. 566 // The exploration uses header indices as block identifiers, since 567 // Block::_pre_order might not be unique in the context of this function. 568 ResourceMark rm; 569 VectorSet descendants; 570 descendants.set(block->head()->_idx); // The goto block is a descendant of itself. 571 Block_List worklist; 572 worklist.push(out); // Start exploring from the successor block. 573 while (worklist.size() > 0) { 574 Block* b = worklist.pop(); 575 // The immediate dominator of b is a descendant, hence b is also a 576 // descendant. Even though all predecessors of b might not have been visited 577 // yet, we know that all dominators of b have been already visited (since 578 // they must appear in any path from the goto block to b). 579 descendants.set(b->head()->_idx); 580 b->_dom_depth++; 581 for (uint i = 0; i < b->_num_succs; i++) { 582 Block* s = b->_succs[i]; 583 if (s != get_root_block() && 584 !descendants.test(s->head()->_idx) && 585 // Do not search below non-descendant successors, since any block 586 // reachable from them cannot be descendant either. 587 descendants.test(s->_idom->head()->_idx)) { 588 worklist.push(s); 589 } 590 } 591 } 592 } 593 594 // Does this block end in a multiway branch that cannot have the default case 595 // flipped for another case? 596 static bool no_flip_branch(Block *b) { 597 int branch_idx = b->number_of_nodes() - b->_num_succs-1; 598 if (branch_idx < 1) { 599 return false; 600 } 601 Node *branch = b->get_node(branch_idx); 602 if (branch->is_Catch()) { 603 return true; 604 } 605 if (branch->is_Mach()) { 606 if (branch->is_MachNullCheck()) { 607 return true; 608 } 609 int iop = branch->as_Mach()->ideal_Opcode(); 610 if (iop == Op_FastLock || iop == Op_FastUnlock) { 611 return true; 612 } 613 // Don't flip if branch has an implicit check. 614 if (branch->as_Mach()->is_TrapBasedCheckNode()) { 615 return true; 616 } 617 } 618 return false; 619 } 620 621 // Check for NeverBranch at block end. This needs to become a GOTO to the 622 // true target. NeverBranch are treated as a conditional branch that always 623 // goes the same direction for most of the optimizer and are used to give a 624 // fake exit path to infinite loops. At this late stage they need to turn 625 // into Goto's so that when you enter the infinite loop you indeed hang. 626 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) { 627 int end_idx = b->end_idx(); 628 NeverBranchNode* never_branch = b->get_node(end_idx)->as_NeverBranch(); 629 Block* succ = get_block_for_node(never_branch->proj_out(0)->unique_ctrl_out()); 630 Block* dead = get_block_for_node(never_branch->proj_out(1)->unique_ctrl_out()); 631 assert(succ == b->_succs[0] || succ == b->_succs[1], "succ is a successor"); 632 assert(dead == b->_succs[0] || dead == b->_succs[1], "dead is a successor"); 633 634 Node* gto = _goto->clone(); // get a new goto node 635 gto->set_req(0, b->head()); 636 Node *bp = b->get_node(end_idx); 637 b->map_node(gto, end_idx); // Slam over NeverBranch 638 map_node_to_block(gto, b); 639 C->regalloc()->set_bad(gto->_idx); 640 b->pop_node(); // Yank projections 641 b->pop_node(); // Yank projections 642 b->_succs.map(0,succ); // Map only successor 643 b->_num_succs = 1; 644 // remap successor's predecessors if necessary 645 uint j; 646 for (j = 1; j < succ->num_preds(); j++) { 647 if (succ->pred(j)->in(0) == bp) { 648 succ->head()->set_req(j, gto); 649 } 650 } 651 // Kill alternate exit path 652 for (j = 1; j < dead->num_preds(); j++) { 653 if (dead->pred(j)->in(0) == bp) { 654 break; 655 } 656 } 657 // Scan through block, yanking dead path from 658 // all regions and phis. 659 dead->head()->del_req(j); 660 for (int k = 1; dead->get_node(k)->is_Phi(); k++) { 661 dead->get_node(k)->del_req(j); 662 } 663 } 664 665 // Helper function to move block bx to the slot following b_index. Return 666 // true if the move is successful, otherwise false 667 bool PhaseCFG::move_to_next(Block* bx, uint b_index) { 668 if (bx == nullptr) return false; 669 670 // Return false if bx is already scheduled. 671 uint bx_index = bx->_pre_order; 672 if ((bx_index <= b_index) && (get_block(bx_index) == bx)) { 673 return false; 674 } 675 676 // Find the current index of block bx on the block list 677 bx_index = b_index + 1; 678 while (bx_index < number_of_blocks() && get_block(bx_index) != bx) { 679 bx_index++; 680 } 681 assert(get_block(bx_index) == bx, "block not found"); 682 683 // If the previous block conditionally falls into bx, return false, 684 // because moving bx will create an extra jump. 685 for(uint k = 1; k < bx->num_preds(); k++ ) { 686 Block* pred = get_block_for_node(bx->pred(k)); 687 if (pred == get_block(bx_index - 1)) { 688 if (pred->_num_succs != 1) { 689 return false; 690 } 691 } 692 } 693 694 // Reinsert bx just past block 'b' 695 _blocks.remove(bx_index); 696 _blocks.insert(b_index + 1, bx); 697 return true; 698 } 699 700 // Move empty and uncommon blocks to the end. 701 void PhaseCFG::move_to_end(Block *b, uint i) { 702 int e = b->is_Empty(); 703 if (e != Block::not_empty) { 704 if (e == Block::empty_with_goto) { 705 // Remove the goto, but leave the block. 706 b->pop_node(); 707 } 708 // Mark this block as a connector block, which will cause it to be 709 // ignored in certain functions such as non_connector_successor(). 710 b->set_connector(); 711 } 712 // Move the empty block to the end, and don't recheck. 713 _blocks.remove(i); 714 _blocks.push(b); 715 } 716 717 // Set loop alignment for every block 718 void PhaseCFG::set_loop_alignment() { 719 uint last = number_of_blocks(); 720 assert(get_block(0) == get_root_block(), ""); 721 722 for (uint i = 1; i < last; i++) { 723 Block* block = get_block(i); 724 if (block->head()->is_Loop()) { 725 block->set_loop_alignment(block); 726 } 727 } 728 } 729 730 // Make empty basic blocks to be "connector" blocks, Move uncommon blocks 731 // to the end. 732 void PhaseCFG::remove_empty_blocks() { 733 // Move uncommon blocks to the end 734 uint last = number_of_blocks(); 735 assert(get_block(0) == get_root_block(), ""); 736 737 for (uint i = 1; i < last; i++) { 738 Block* block = get_block(i); 739 if (block->is_connector()) { 740 break; 741 } 742 743 // Check for NeverBranch at block end. This needs to become a GOTO to the 744 // true target. NeverBranch are treated as a conditional branch that 745 // always goes the same direction for most of the optimizer and are used 746 // to give a fake exit path to infinite loops. At this late stage they 747 // need to turn into Goto's so that when you enter the infinite loop you 748 // indeed hang. 749 if (block->get_node(block->end_idx())->is_NeverBranch()) { 750 convert_NeverBranch_to_Goto(block); 751 } 752 753 // Look for uncommon blocks and move to end. 754 if (!C->do_freq_based_layout()) { 755 if (is_uncommon(block)) { 756 move_to_end(block, i); 757 last--; // No longer check for being uncommon! 758 if (no_flip_branch(block)) { // Fall-thru case must follow? 759 // Find the fall-thru block 760 block = get_block(i); 761 move_to_end(block, i); 762 last--; 763 } 764 // backup block counter post-increment 765 i--; 766 } 767 } 768 } 769 770 // Move empty blocks to the end 771 last = number_of_blocks(); 772 for (uint i = 1; i < last; i++) { 773 Block* block = get_block(i); 774 if (block->is_Empty() != Block::not_empty) { 775 move_to_end(block, i); 776 last--; 777 i--; 778 } 779 } // End of for all blocks 780 } 781 782 Block *PhaseCFG::fixup_trap_based_check(Node *branch, Block *block, int block_pos, Block *bnext) { 783 // Trap based checks must fall through to the successor with 784 // PROB_ALWAYS. 785 // They should be an If with 2 successors. 786 assert(branch->is_MachIf(), "must be If"); 787 assert(block->_num_succs == 2, "must have 2 successors"); 788 789 // Get the If node and the projection for the first successor. 790 MachIfNode *iff = block->get_node(block->number_of_nodes()-3)->as_MachIf(); 791 ProjNode *proj0 = block->get_node(block->number_of_nodes()-2)->as_Proj(); 792 ProjNode *proj1 = block->get_node(block->number_of_nodes()-1)->as_Proj(); 793 ProjNode *projt = (proj0->Opcode() == Op_IfTrue) ? proj0 : proj1; 794 ProjNode *projf = (proj0->Opcode() == Op_IfFalse) ? proj0 : proj1; 795 796 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1]. 797 assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0"); 798 assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1"); 799 800 ProjNode *proj_always; 801 ProjNode *proj_never; 802 // We must negate the branch if the implicit check doesn't follow 803 // the branch's TRUE path. Then, the new TRUE branch target will 804 // be the old FALSE branch target. 805 if (iff->_prob <= 2*PROB_NEVER) { // There are small rounding errors. 806 proj_never = projt; 807 proj_always = projf; 808 } else { 809 // We must negate the branch if the trap doesn't follow the 810 // branch's TRUE path. Then, the new TRUE branch target will 811 // be the old FALSE branch target. 812 proj_never = projf; 813 proj_always = projt; 814 iff->negate(); 815 } 816 assert(iff->_prob <= 2*PROB_NEVER, "Trap based checks are expected to trap never!"); 817 // Map the successors properly 818 block->_succs.map(0, get_block_for_node(proj_never ->raw_out(0))); // The target of the trap. 819 block->_succs.map(1, get_block_for_node(proj_always->raw_out(0))); // The fall through target. 820 821 if (block->get_node(block->number_of_nodes() - block->_num_succs + 1) != proj_always) { 822 block->map_node(proj_never, block->number_of_nodes() - block->_num_succs + 0); 823 block->map_node(proj_always, block->number_of_nodes() - block->_num_succs + 1); 824 } 825 826 // Place the fall through block after this block. 827 Block *bs1 = block->non_connector_successor(1); 828 if (bs1 != bnext && move_to_next(bs1, block_pos)) { 829 bnext = bs1; 830 } 831 // If the fall through block still is not the next block, insert a goto. 832 if (bs1 != bnext) { 833 insert_goto_at(block_pos, 1); 834 } 835 return bnext; 836 } 837 838 // Fix up the final control flow for basic blocks. 839 void PhaseCFG::fixup_flow() { 840 // Fixup final control flow for the blocks. Remove jump-to-next 841 // block. If neither arm of an IF follows the conditional branch, we 842 // have to add a second jump after the conditional. We place the 843 // TRUE branch target in succs[0] for both GOTOs and IFs. 844 for (uint i = 0; i < number_of_blocks(); i++) { 845 Block* block = get_block(i); 846 block->_pre_order = i; // turn pre-order into block-index 847 848 // Connector blocks need no further processing. 849 if (block->is_connector()) { 850 assert((i+1) == number_of_blocks() || get_block(i + 1)->is_connector(), "All connector blocks should sink to the end"); 851 continue; 852 } 853 assert(block->is_Empty() != Block::completely_empty, "Empty blocks should be connectors"); 854 855 Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : nullptr; 856 Block* bs0 = block->non_connector_successor(0); 857 858 // Check for multi-way branches where I cannot negate the test to 859 // exchange the true and false targets. 860 if (no_flip_branch(block)) { 861 // Find fall through case - if must fall into its target. 862 // Get the index of the branch's first successor. 863 int branch_idx = block->number_of_nodes() - block->_num_succs; 864 865 // The branch is 1 before the branch's first successor. 866 Node *branch = block->get_node(branch_idx-1); 867 868 // Handle no-flip branches which have implicit checks and which require 869 // special block ordering and individual semantics of the 'fall through 870 // case'. 871 if ((TrapBasedNullChecks || TrapBasedRangeChecks) && 872 branch->is_Mach() && branch->as_Mach()->is_TrapBasedCheckNode()) { 873 bnext = fixup_trap_based_check(branch, block, i, bnext); 874 } else { 875 // Else, default handling for no-flip branches 876 for (uint j2 = 0; j2 < block->_num_succs; j2++) { 877 const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj(); 878 if (p->_con == 0) { 879 // successor j2 is fall through case 880 if (block->non_connector_successor(j2) != bnext) { 881 // but it is not the next block => insert a goto 882 insert_goto_at(i, j2); 883 } 884 // Put taken branch in slot 0 885 if (j2 == 0 && block->_num_succs == 2) { 886 // Flip targets in succs map 887 Block *tbs0 = block->_succs[0]; 888 Block *tbs1 = block->_succs[1]; 889 block->_succs.map(0, tbs1); 890 block->_succs.map(1, tbs0); 891 } 892 break; 893 } 894 } 895 } 896 897 // Remove all CatchProjs 898 for (uint j = 0; j < block->_num_succs; j++) { 899 block->pop_node(); 900 } 901 902 } else if (block->_num_succs == 1) { 903 // Block ends in a Goto? 904 if (bnext == bs0) { 905 // We fall into next block; remove the Goto 906 block->pop_node(); 907 } 908 909 } else if(block->_num_succs == 2) { // Block ends in a If? 910 // Get opcode of 1st projection (matches _succs[0]) 911 // Note: Since this basic block has 2 exits, the last 2 nodes must 912 // be projections (in any order), the 3rd last node must be 913 // the IfNode (we have excluded other 2-way exits such as 914 // CatchNodes already). 915 MachNode* iff = block->get_node(block->number_of_nodes() - 3)->as_Mach(); 916 ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj(); 917 ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj(); 918 919 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1]. 920 assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0"); 921 assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1"); 922 923 Block* bs1 = block->non_connector_successor(1); 924 925 // Check for neither successor block following the current 926 // block ending in a conditional. If so, move one of the 927 // successors after the current one, provided that the 928 // successor was previously unscheduled, but moveable 929 // (i.e., all paths to it involve a branch). 930 if (!C->do_freq_based_layout() && bnext != bs0 && bnext != bs1) { 931 // Choose the more common successor based on the probability 932 // of the conditional branch. 933 Block* bx = bs0; 934 Block* by = bs1; 935 936 // _prob is the probability of taking the true path. Make 937 // p the probability of taking successor #1. 938 float p = iff->as_MachIf()->_prob; 939 if (proj0->Opcode() == Op_IfTrue) { 940 p = 1.0 - p; 941 } 942 943 // Prefer successor #1 if p > 0.5 944 if (p > PROB_FAIR) { 945 bx = bs1; 946 by = bs0; 947 } 948 949 // Attempt the more common successor first 950 if (move_to_next(bx, i)) { 951 bnext = bx; 952 } else if (move_to_next(by, i)) { 953 bnext = by; 954 } 955 } 956 957 // Check for conditional branching the wrong way. Negate 958 // conditional, if needed, so it falls into the following block 959 // and branches to the not-following block. 960 961 // Check for the next block being in succs[0]. We are going to branch 962 // to succs[0], so we want the fall-thru case as the next block in 963 // succs[1]. 964 if (bnext == bs0) { 965 // Fall-thru case in succs[0], should be in succs[1], so flip targets in _succs map 966 Block* tbs0 = block->_succs[0]; 967 Block* tbs1 = block->_succs[1]; 968 block->_succs.map(0, tbs1); 969 block->_succs.map(1, tbs0); 970 // Flip projection for each target 971 swap(proj0, proj1); 972 } else if(bnext != bs1) { 973 // Need a double-branch 974 // The existing conditional branch need not change. 975 // Add a unconditional branch to the false target. 976 // Alas, it must appear in its own block and adding a 977 // block this late in the game is complicated. Sigh. 978 insert_goto_at(i, 1); 979 } 980 981 // Make sure we TRUE branch to the target 982 if (proj0->Opcode() == Op_IfFalse) { 983 iff->as_MachIf()->negate(); 984 } 985 986 block->pop_node(); // Remove IfFalse & IfTrue projections 987 block->pop_node(); 988 989 } else { 990 // Multi-exit block, e.g. a switch statement 991 // But we don't need to do anything here 992 } 993 } // End of for all blocks 994 } 995 996 void PhaseCFG::remove_unreachable_blocks() { 997 ResourceMark rm; 998 Block_List unreachable; 999 // Initialize worklist of unreachable blocks to be removed. 1000 for (uint i = 0; i < number_of_blocks(); i++) { 1001 Block* block = get_block(i); 1002 assert(block->_pre_order == i, "Block::pre_order does not match block index"); 1003 if (block->is_trivially_unreachable()) { 1004 unreachable.push(block); 1005 } 1006 } 1007 // Now remove all blocks that are transitively unreachable. 1008 while (unreachable.size() > 0) { 1009 Block* dead = unreachable.pop(); 1010 // When this code runs (after PhaseCFG::fixup_flow()), Block::_pre_order 1011 // does not contain pre-order but block-list indices. Ensure they stay 1012 // contiguous by decrementing _pre_order for all elements after 'dead'. 1013 // Block::_rpo does not contain valid reverse post-order indices anymore 1014 // (they are invalidated by block insertions in PhaseCFG::fixup_flow()), 1015 // so there is no need to update them. 1016 for (uint i = dead->_pre_order + 1; i < number_of_blocks(); i++) { 1017 get_block(i)->_pre_order--; 1018 } 1019 _blocks.remove(dead->_pre_order); 1020 _number_of_blocks--; 1021 // Update the successors' predecessor list and push new unreachable blocks. 1022 for (uint i = 0; i < dead->_num_succs; i++) { 1023 Block* succ = dead->_succs[i]; 1024 Node* head = succ->head(); 1025 for (int j = head->req() - 1; j >= 1; j--) { 1026 if (get_block_for_node(head->in(j)) == dead) { 1027 head->del_req(j); 1028 } 1029 } 1030 if (succ->is_trivially_unreachable()) { 1031 unreachable.push(succ); 1032 } 1033 } 1034 } 1035 } 1036 1037 // postalloc_expand: Expand nodes after register allocation. 1038 // 1039 // postalloc_expand has to be called after register allocation, just 1040 // before output (i.e. scheduling). It only gets called if 1041 // Matcher::require_postalloc_expand is true. 1042 // 1043 // Background: 1044 // 1045 // Nodes that are expandend (one compound node requiring several 1046 // assembler instructions to be implemented split into two or more 1047 // non-compound nodes) after register allocation are not as nice as 1048 // the ones expanded before register allocation - they don't 1049 // participate in optimizations as global code motion. But after 1050 // register allocation we can expand nodes that use registers which 1051 // are not spillable or registers that are not allocated, because the 1052 // old compound node is simply replaced (in its location in the basic 1053 // block) by a new subgraph which does not contain compound nodes any 1054 // more. The scheduler called during output can later on process these 1055 // non-compound nodes. 1056 // 1057 // Implementation: 1058 // 1059 // Nodes requiring postalloc expand are specified in the ad file by using 1060 // a postalloc_expand statement instead of ins_encode. A postalloc_expand 1061 // contains a single call to an encoding, as does an ins_encode 1062 // statement. Instead of an emit() function a postalloc_expand() function 1063 // is generated that doesn't emit assembler but creates a new 1064 // subgraph. The code below calls this postalloc_expand function for each 1065 // node with the appropriate attribute. This function returns the new 1066 // nodes generated in an array passed in the call. The old node, 1067 // potential MachTemps before and potential Projs after it then get 1068 // disconnected and replaced by the new nodes. The instruction 1069 // generating the result has to be the last one in the array. In 1070 // general it is assumed that Projs after the node expanded are 1071 // kills. These kills are not required any more after expanding as 1072 // there are now explicitly visible def-use chains and the Projs are 1073 // removed. This does not hold for calls: They do not only have 1074 // kill-Projs but also Projs defining values. Therefore Projs after 1075 // the node expanded are removed for all but for calls. If a node is 1076 // to be reused, it must be added to the nodes list returned, and it 1077 // will be added again. 1078 // 1079 // Implementing the postalloc_expand function for a node in an enc_class 1080 // is rather tedious. It requires knowledge about many node details, as 1081 // the nodes and the subgraph must be hand crafted. To simplify this, 1082 // adlc generates some utility variables into the postalloc_expand function, 1083 // e.g., holding the operands as specified by the postalloc_expand encoding 1084 // specification, e.g.: 1085 // * unsigned idx_<par_name> holding the index of the node in the ins 1086 // * Node *n_<par_name> holding the node loaded from the ins 1087 // * MachOpnd *op_<par_name> holding the corresponding operand 1088 // 1089 // The ordering of operands can not be determined by looking at a 1090 // rule. Especially if a match rule matches several different trees, 1091 // several nodes are generated from one instruct specification with 1092 // different operand orderings. In this case the adlc generated 1093 // variables are the only way to access the ins and operands 1094 // deterministically. 1095 // 1096 // If assigning a register to a node that contains an oop, don't 1097 // forget to call ra_->set_oop() for the node. 1098 void PhaseCFG::postalloc_expand(PhaseRegAlloc* _ra) { 1099 GrowableArray <Node *> new_nodes(32); // Array with new nodes filled by postalloc_expand function of node. 1100 GrowableArray <Node *> remove(32); 1101 GrowableArray <Node *> succs(32); 1102 unsigned int max_idx = C->unique(); // Remember to distinguish new from old nodes. 1103 DEBUG_ONLY(bool foundNode = false); 1104 1105 // for all blocks 1106 for (uint i = 0; i < number_of_blocks(); i++) { 1107 Block *b = _blocks[i]; 1108 // For all instructions in the current block. 1109 for (uint j = 0; j < b->number_of_nodes(); j++) { 1110 Node *n = b->get_node(j); 1111 if (n->is_Mach() && n->as_Mach()->requires_postalloc_expand()) { 1112 #ifdef ASSERT 1113 if (TracePostallocExpand) { 1114 if (!foundNode) { 1115 foundNode = true; 1116 tty->print("POSTALLOC EXPANDING %d %s\n", C->compile_id(), 1117 C->method() ? C->method()->name()->as_utf8() : C->stub_name()); 1118 } 1119 tty->print(" postalloc expanding "); n->dump(); 1120 if (Verbose) { 1121 tty->print(" with ins:\n"); 1122 for (uint k = 0; k < n->len(); ++k) { 1123 if (n->in(k)) { tty->print(" "); n->in(k)->dump(); } 1124 } 1125 } 1126 } 1127 #endif 1128 new_nodes.clear(); 1129 // Collect nodes that have to be removed from the block later on. 1130 uint req = n->req(); 1131 remove.clear(); 1132 for (uint k = 0; k < req; ++k) { 1133 if (n->in(k) && n->in(k)->is_MachTemp()) { 1134 remove.push(n->in(k)); // MachTemps which are inputs to the old node have to be removed. 1135 n->in(k)->del_req(0); 1136 j--; 1137 } 1138 } 1139 1140 // Check whether we can allocate enough nodes. We set a fix limit for 1141 // the size of postalloc expands with this. 1142 uint unique_limit = C->unique() + 40; 1143 if (unique_limit >= _ra->node_regs_max_index()) { 1144 Compile::current()->record_failure("out of nodes in postalloc expand"); 1145 return; 1146 } 1147 1148 // Emit (i.e. generate new nodes). 1149 n->as_Mach()->postalloc_expand(&new_nodes, _ra); 1150 1151 assert(C->unique() < unique_limit, "You allocated too many nodes in your postalloc expand."); 1152 1153 // Disconnect the inputs of the old node. 1154 // 1155 // We reuse MachSpillCopy nodes. If we need to expand them, there 1156 // are many, so reusing pays off. If reused, the node already 1157 // has the new ins. n must be the last node on new_nodes list. 1158 if (!n->is_MachSpillCopy()) { 1159 for (int k = req - 1; k >= 0; --k) { 1160 n->del_req(k); 1161 } 1162 } 1163 1164 #ifdef ASSERT 1165 // Check that all nodes have proper operands. 1166 for (int k = 0; k < new_nodes.length(); ++k) { 1167 if (new_nodes.at(k)->_idx < max_idx || !new_nodes.at(k)->is_Mach()) continue; // old node, Proj ... 1168 MachNode *m = new_nodes.at(k)->as_Mach(); 1169 for (unsigned int l = 0; l < m->num_opnds(); ++l) { 1170 if (MachOper::notAnOper(m->_opnds[l])) { 1171 outputStream *os = tty; 1172 os->print("Node %s ", m->Name()); 1173 os->print("has invalid opnd %d: %p\n", l, m->_opnds[l]); 1174 assert(0, "Invalid operands, see inline trace in hs_err_pid file."); 1175 } 1176 } 1177 } 1178 #endif 1179 1180 // Collect succs of old node in remove (for projections) and in succs (for 1181 // all other nodes) do _not_ collect projections in remove (but in succs) 1182 // in case the node is a call. We need the projections for calls as they are 1183 // associated with registers (i.e. they are defs). 1184 succs.clear(); 1185 for (DUIterator k = n->outs(); n->has_out(k); k++) { 1186 if (n->out(k)->is_Proj() && !n->is_MachCall() && !n->is_MachBranch()) { 1187 remove.push(n->out(k)); 1188 } else { 1189 succs.push(n->out(k)); 1190 } 1191 } 1192 // Replace old node n as input of its succs by last of the new nodes. 1193 for (int k = 0; k < succs.length(); ++k) { 1194 Node *succ = succs.at(k); 1195 for (uint l = 0; l < succ->req(); ++l) { 1196 if (succ->in(l) == n) { 1197 succ->set_req(l, new_nodes.at(new_nodes.length() - 1)); 1198 } 1199 } 1200 for (uint l = succ->req(); l < succ->len(); ++l) { 1201 if (succ->in(l) == n) { 1202 succ->set_prec(l, new_nodes.at(new_nodes.length() - 1)); 1203 } 1204 } 1205 } 1206 1207 // Index of old node in block. 1208 uint index = b->find_node(n); 1209 // Insert new nodes into block and map them in nodes->blocks array 1210 // and remember last node in n2. 1211 Node *n2 = nullptr; 1212 for (int k = 0; k < new_nodes.length(); ++k) { 1213 n2 = new_nodes.at(k); 1214 b->insert_node(n2, ++index); 1215 map_node_to_block(n2, b); 1216 } 1217 1218 // Add old node n to remove and remove them all from block. 1219 remove.push(n); 1220 j--; 1221 #ifdef ASSERT 1222 if (TracePostallocExpand && Verbose) { 1223 tty->print(" removing:\n"); 1224 for (int k = 0; k < remove.length(); ++k) { 1225 tty->print(" "); remove.at(k)->dump(); 1226 } 1227 tty->print(" inserting:\n"); 1228 for (int k = 0; k < new_nodes.length(); ++k) { 1229 tty->print(" "); new_nodes.at(k)->dump(); 1230 } 1231 } 1232 #endif 1233 for (int k = 0; k < remove.length(); ++k) { 1234 if (b->contains(remove.at(k))) { 1235 b->find_remove(remove.at(k)); 1236 } else { 1237 assert(remove.at(k)->is_Proj() && (remove.at(k)->in(0)->is_MachBranch()), ""); 1238 } 1239 } 1240 // If anything has been inserted (n2 != nullptr), continue after last node inserted. 1241 // This does not always work. Some postalloc expands don't insert any nodes, if they 1242 // do optimizations (e.g., max(x,x)). In this case we decrement j accordingly. 1243 j = n2 ? b->find_node(n2) : j; 1244 } 1245 } 1246 } 1247 1248 #ifdef ASSERT 1249 if (foundNode) { 1250 tty->print("FINISHED %d %s\n", C->compile_id(), 1251 C->method() ? C->method()->name()->as_utf8() : C->stub_name()); 1252 tty->flush(); 1253 } 1254 #endif 1255 } 1256 1257 1258 //------------------------------dump------------------------------------------- 1259 #ifndef PRODUCT 1260 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const { 1261 const Node *x = end->is_block_proj(); 1262 assert( x, "not a CFG" ); 1263 1264 // Do not visit this block again 1265 if( visited.test_set(x->_idx) ) return; 1266 1267 // Skip through this block 1268 const Node *p = x; 1269 do { 1270 p = p->in(0); // Move control forward 1271 assert( !p->is_block_proj() || p->is_Root(), "not a CFG" ); 1272 } while( !p->is_block_start() ); 1273 1274 // Recursively visit 1275 for (uint i = 1; i < p->req(); i++) { 1276 _dump_cfg(p->in(i), visited); 1277 } 1278 1279 // Dump the block 1280 get_block_for_node(p)->dump(this); 1281 } 1282 1283 void PhaseCFG::dump( ) const { 1284 tty->print("\n--- CFG --- %d BBs\n", number_of_blocks()); 1285 if (_blocks.size()) { // Did we do basic-block layout? 1286 for (uint i = 0; i < number_of_blocks(); i++) { 1287 const Block* block = get_block(i); 1288 block->dump(this); 1289 } 1290 } else { // Else do it with a DFS 1291 VectorSet visited(_block_arena); 1292 _dump_cfg(_root,visited); 1293 } 1294 } 1295 1296 void PhaseCFG::dump_headers() { 1297 for (uint i = 0; i < number_of_blocks(); i++) { 1298 Block* block = get_block(i); 1299 if (block != nullptr) { 1300 block->dump_head(this); 1301 } 1302 } 1303 } 1304 #endif // !PRODUCT 1305 1306 #ifdef ASSERT 1307 void PhaseCFG::verify_memory_writer_placement(const Block* b, const Node* n) const { 1308 if (!n->is_memory_writer()) { 1309 return; 1310 } 1311 CFGLoop* home_or_ancestor = find_block_for_node(n->in(0))->_loop; 1312 bool found = false; 1313 do { 1314 if (b->_loop == home_or_ancestor) { 1315 found = true; 1316 break; 1317 } 1318 home_or_ancestor = home_or_ancestor->parent(); 1319 } while (home_or_ancestor != nullptr); 1320 assert(found, "block b is not in n's home loop or an ancestor of it"); 1321 } 1322 1323 void PhaseCFG::verify_dominator_tree() const { 1324 for (uint i = 0; i < number_of_blocks(); i++) { 1325 Block* block = get_block(i); 1326 assert(block->_dom_depth <= number_of_blocks(), "unexpected dominator tree depth"); 1327 if (block == get_root_block()) { 1328 assert(block->_dom_depth == 1, "unexpected root dominator tree depth"); 1329 // The root block does not have an immediate dominator, stop checking. 1330 continue; 1331 } 1332 assert(block->_idom != nullptr, "non-root blocks must have immediate dominators"); 1333 assert(block->_dom_depth == block->_idom->_dom_depth + 1, 1334 "the dominator tree depth of a node must succeed that of its immediate dominator"); 1335 assert(block->num_preds() > 2 || block->_idom == get_block_for_node(block->pred(1)), 1336 "the immediate dominator of a single-predecessor block must be the predecessor"); 1337 } 1338 } 1339 1340 void PhaseCFG::verify() const { 1341 // Verify sane CFG 1342 for (uint i = 0; i < number_of_blocks(); i++) { 1343 Block* block = get_block(i); 1344 uint cnt = block->number_of_nodes(); 1345 uint j; 1346 for (j = 0; j < cnt; j++) { 1347 Node *n = block->get_node(j); 1348 assert(get_block_for_node(n) == block, ""); 1349 if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) { 1350 assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block"); 1351 } 1352 verify_memory_writer_placement(block, n); 1353 if (n->needs_anti_dependence_check()) { 1354 verify_anti_dependences(block, n); 1355 if (C->failing()) { 1356 return; 1357 } 1358 } 1359 for (uint k = 0; k < n->req(); k++) { 1360 Node *def = n->in(k); 1361 if (def && def != n) { 1362 Block* def_block = get_block_for_node(def); 1363 assert(def_block || def->is_Con(), "must have block; constants for debug info ok"); 1364 // Verify that all definitions dominate their uses (except for virtual 1365 // instructions merging multiple definitions). 1366 assert(n->is_Root() || n->is_Region() || n->is_Phi() || n->is_MachMerge() || 1367 def_block->dominates(block), 1368 "uses must be dominated by definitions"); 1369 // Verify that instructions in the block are in correct order. 1370 // Uses must follow their definition if they are at the same block. 1371 // Mostly done to check that MachSpillCopy nodes are placed correctly 1372 // when CreateEx node is moved in build_ifg_physical(). 1373 if (def_block == block && !(block->head()->is_Loop() && n->is_Phi()) && 1374 // See (+++) comment in reg_split.cpp 1375 !(n->jvms() != nullptr && n->jvms()->is_monitor_use(k))) { 1376 bool is_loop = false; 1377 if (n->is_Phi()) { 1378 for (uint l = 1; l < def->req(); l++) { 1379 if (n == def->in(l)) { 1380 is_loop = true; 1381 break; // Some kind of loop 1382 } 1383 } 1384 } 1385 // Uses must be before definition, except if: 1386 // - We are in some kind of loop we already detected 1387 // - We are in infinite loop, where Region may not have been turned into LoopNode 1388 assert(block->find_node(def) < j || 1389 is_loop || 1390 (n->is_Phi() && block->head()->as_Region()->is_in_infinite_subgraph()), 1391 "uses must follow definitions (except in loops)"); 1392 } 1393 } 1394 } 1395 if (n->is_Proj()) { 1396 assert(j >= 1, "a projection cannot be the first instruction in a block"); 1397 Node* pred = block->get_node(j - 1); 1398 Node* parent = n->in(0); 1399 assert(parent != nullptr, "projections must have a parent"); 1400 assert(pred == parent || (pred->is_Proj() && pred->in(0) == parent), 1401 "projections must follow their parents or other sibling projections"); 1402 } 1403 } 1404 1405 j = block->end_idx(); 1406 Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj(); 1407 assert(bp, "last instruction must be a block proj"); 1408 assert(bp == block->get_node(j), "wrong number of successors for this block"); 1409 if (bp->is_Catch()) { 1410 while (block->get_node(--j)->is_MachProj()) { 1411 ; 1412 } 1413 assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call"); 1414 } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) { 1415 assert(block->_num_succs == 2, "Conditional branch must have two targets"); 1416 } 1417 } 1418 verify_dominator_tree(); 1419 } 1420 #endif // ASSERT 1421 1422 UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) { 1423 Copy::zero_to_bytes( _indices, sizeof(uint)*max ); 1424 } 1425 1426 void UnionFind::extend( uint from_idx, uint to_idx ) { 1427 _nesting.check(); // Check if a potential reallocation in the resource arena is safe 1428 if( from_idx >= _max ) { 1429 uint size = 16; 1430 while( size <= from_idx ) size <<=1; 1431 _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size ); 1432 _max = size; 1433 } 1434 while( _cnt <= from_idx ) _indices[_cnt++] = 0; 1435 _indices[from_idx] = to_idx; 1436 } 1437 1438 void UnionFind::reset( uint max ) { 1439 // Force the Union-Find mapping to be at least this large 1440 extend(max,0); 1441 // Initialize to be the ID mapping. 1442 for( uint i=0; i<max; i++ ) map(i,i); 1443 } 1444 1445 // Straight out of Tarjan's union-find algorithm 1446 uint UnionFind::Find_compress( uint idx ) { 1447 uint cur = idx; 1448 uint next = lookup(cur); 1449 while( next != cur ) { // Scan chain of equivalences 1450 assert( next < cur, "always union smaller" ); 1451 cur = next; // until find a fixed-point 1452 next = lookup(cur); 1453 } 1454 // Core of union-find algorithm: update chain of 1455 // equivalences to be equal to the root. 1456 while( idx != next ) { 1457 uint tmp = lookup(idx); 1458 map(idx, next); 1459 idx = tmp; 1460 } 1461 return idx; 1462 } 1463 1464 // Like Find above, but no path compress, so bad asymptotic behavior 1465 uint UnionFind::Find_const( uint idx ) const { 1466 if( idx == 0 ) return idx; // Ignore the zero idx 1467 // Off the end? This can happen during debugging dumps 1468 // when data structures have not finished being updated. 1469 if( idx >= _max ) return idx; 1470 uint next = lookup(idx); 1471 while( next != idx ) { // Scan chain of equivalences 1472 idx = next; // until find a fixed-point 1473 next = lookup(idx); 1474 } 1475 return next; 1476 } 1477 1478 // union 2 sets together. 1479 void UnionFind::Union( uint idx1, uint idx2 ) { 1480 uint src = Find(idx1); 1481 uint dst = Find(idx2); 1482 assert( src, "" ); 1483 assert( dst, "" ); 1484 assert( src < _max, "oob" ); 1485 assert( dst < _max, "oob" ); 1486 assert( src < dst, "always union smaller" ); 1487 map(dst,src); 1488 } 1489 1490 #ifndef PRODUCT 1491 void Trace::dump( ) const { 1492 tty->print_cr("Trace (freq %f)", first_block()->_freq); 1493 for (Block *b = first_block(); b != nullptr; b = next(b)) { 1494 tty->print(" B%d", b->_pre_order); 1495 if (b->head()->is_Loop()) { 1496 tty->print(" (L%d)", b->compute_loop_alignment()); 1497 } 1498 if (b->has_loop_alignment()) { 1499 tty->print(" (T%d)", b->code_alignment()); 1500 } 1501 } 1502 tty->cr(); 1503 } 1504 1505 void CFGEdge::dump( ) const { 1506 tty->print(" B%d --> B%d Freq: %f out:%3d%% in:%3d%% State: ", 1507 from()->_pre_order, to()->_pre_order, freq(), _from_pct, _to_pct); 1508 switch(state()) { 1509 case connected: 1510 tty->print("connected"); 1511 break; 1512 case open: 1513 tty->print("open"); 1514 break; 1515 case interior: 1516 tty->print("interior"); 1517 break; 1518 } 1519 if (infrequent()) { 1520 tty->print(" infrequent"); 1521 } 1522 tty->cr(); 1523 } 1524 #endif 1525 1526 // Comparison function for edges 1527 static int edge_order(CFGEdge **e0, CFGEdge **e1) { 1528 float freq0 = (*e0)->freq(); 1529 float freq1 = (*e1)->freq(); 1530 if (freq0 != freq1) { 1531 return freq0 > freq1 ? -1 : 1; 1532 } 1533 1534 int dist0 = (*e0)->to()->_rpo - (*e0)->from()->_rpo; 1535 int dist1 = (*e1)->to()->_rpo - (*e1)->from()->_rpo; 1536 1537 return dist1 - dist0; 1538 } 1539 1540 // Comparison function for edges 1541 extern "C" int trace_frequency_order(const void *p0, const void *p1) { 1542 Trace *tr0 = *(Trace **) p0; 1543 Trace *tr1 = *(Trace **) p1; 1544 Block *b0 = tr0->first_block(); 1545 Block *b1 = tr1->first_block(); 1546 1547 // The trace of connector blocks goes at the end; 1548 // we only expect one such trace 1549 if (b0->is_connector() != b1->is_connector()) { 1550 return b1->is_connector() ? -1 : 1; 1551 } 1552 1553 // Pull more frequently executed blocks to the beginning 1554 float freq0 = b0->_freq; 1555 float freq1 = b1->_freq; 1556 if (freq0 != freq1) { 1557 return freq0 > freq1 ? -1 : 1; 1558 } 1559 1560 int diff = tr0->first_block()->_rpo - tr1->first_block()->_rpo; 1561 1562 return diff; 1563 } 1564 1565 // Find edges of interest, i.e, those which can fall through. Presumes that 1566 // edges which don't fall through are of low frequency and can be generally 1567 // ignored. Initialize the list of traces. 1568 void PhaseBlockLayout::find_edges() { 1569 // Walk the blocks, creating edges and Traces 1570 uint i; 1571 Trace *tr = nullptr; 1572 for (i = 0; i < _cfg.number_of_blocks(); i++) { 1573 Block* b = _cfg.get_block(i); 1574 tr = new Trace(b, next, prev); 1575 traces[tr->id()] = tr; 1576 1577 // All connector blocks should be at the end of the list 1578 if (b->is_connector()) break; 1579 1580 // If this block and the next one have a one-to-one successor 1581 // predecessor relationship, simply append the next block 1582 int nfallthru = b->num_fall_throughs(); 1583 while (nfallthru == 1 && 1584 b->succ_fall_through(0)) { 1585 Block *n = b->_succs[0]; 1586 1587 // Skip over single-entry connector blocks, we don't want to 1588 // add them to the trace. 1589 while (n->is_connector() && n->num_preds() == 1) { 1590 n = n->_succs[0]; 1591 } 1592 1593 // We see a merge point, so stop search for the next block 1594 if (n->num_preds() != 1) break; 1595 1596 i++; 1597 assert(n == _cfg.get_block(i), "expecting next block"); 1598 tr->append(n); 1599 uf->map(n->_pre_order, tr->id()); 1600 traces[n->_pre_order] = nullptr; 1601 nfallthru = b->num_fall_throughs(); 1602 b = n; 1603 } 1604 1605 if (nfallthru > 0) { 1606 // Create a CFGEdge for each outgoing 1607 // edge that could be a fall-through. 1608 for (uint j = 0; j < b->_num_succs; j++ ) { 1609 if (b->succ_fall_through(j)) { 1610 Block *target = b->non_connector_successor(j); 1611 float freq = b->_freq * b->succ_prob(j); 1612 int from_pct = (int) ((100 * freq) / b->_freq); 1613 float f_to_pct = (100 * freq) / target->_freq; 1614 int to_pct = (f_to_pct < 100.0) ? (int)f_to_pct : 100; 1615 edges->append(new CFGEdge(b, target, freq, from_pct, to_pct)); 1616 } 1617 } 1618 } 1619 } 1620 1621 // Group connector blocks into one trace 1622 for (i++; i < _cfg.number_of_blocks(); i++) { 1623 Block *b = _cfg.get_block(i); 1624 assert(b->is_connector(), "connector blocks at the end"); 1625 tr->append(b); 1626 uf->map(b->_pre_order, tr->id()); 1627 traces[b->_pre_order] = nullptr; 1628 } 1629 } 1630 1631 // Union two traces together in uf, and null out the trace in the list 1632 void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) { 1633 uint old_id = old_trace->id(); 1634 uint updated_id = updated_trace->id(); 1635 1636 uint lo_id = updated_id; 1637 uint hi_id = old_id; 1638 1639 // If from is greater than to, swap values to meet 1640 // UnionFind guarantee. 1641 if (updated_id > old_id) { 1642 lo_id = old_id; 1643 hi_id = updated_id; 1644 1645 // Fix up the trace ids 1646 traces[lo_id] = traces[updated_id]; 1647 updated_trace->set_id(lo_id); 1648 } 1649 1650 // Union the lower with the higher and remove the pointer 1651 // to the higher. 1652 uf->Union(lo_id, hi_id); 1653 traces[hi_id] = nullptr; 1654 } 1655 1656 // Append traces together via the most frequently executed edges 1657 void PhaseBlockLayout::grow_traces() { 1658 // Order the edges, and drive the growth of Traces via the most 1659 // frequently executed edges. 1660 edges->sort(edge_order); 1661 for (int i = 0; i < edges->length(); i++) { 1662 CFGEdge *e = edges->at(i); 1663 1664 if (e->state() != CFGEdge::open) continue; 1665 1666 Block *src_block = e->from(); 1667 Block *targ_block = e->to(); 1668 1669 // Don't grow traces along backedges? 1670 if (!BlockLayoutRotateLoops) { 1671 if (targ_block->_rpo <= src_block->_rpo) { 1672 targ_block->set_loop_alignment(targ_block); 1673 continue; 1674 } 1675 } 1676 1677 Trace *src_trace = trace(src_block); 1678 Trace *targ_trace = trace(targ_block); 1679 1680 // If the edge in question can join two traces at their ends, 1681 // append one trace to the other. 1682 if (src_trace->last_block() == src_block) { 1683 if (src_trace == targ_trace) { 1684 e->set_state(CFGEdge::interior); 1685 if (targ_trace->backedge(e)) { 1686 // Reset i to catch any newly eligible edge 1687 // (Or we could remember the first "open" edge, and reset there) 1688 i = 0; 1689 } 1690 } else if (targ_trace->first_block() == targ_block) { 1691 e->set_state(CFGEdge::connected); 1692 src_trace->append(targ_trace); 1693 union_traces(src_trace, targ_trace); 1694 } 1695 } 1696 } 1697 } 1698 1699 // Embed one trace into another, if the fork or join points are sufficiently 1700 // balanced. 1701 void PhaseBlockLayout::merge_traces(bool fall_thru_only) { 1702 // Walk the edge list a another time, looking at unprocessed edges. 1703 // Fold in diamonds 1704 for (int i = 0; i < edges->length(); i++) { 1705 CFGEdge *e = edges->at(i); 1706 1707 if (e->state() != CFGEdge::open) continue; 1708 if (fall_thru_only) { 1709 if (e->infrequent()) continue; 1710 } 1711 1712 Block *src_block = e->from(); 1713 Trace *src_trace = trace(src_block); 1714 bool src_at_tail = src_trace->last_block() == src_block; 1715 1716 Block *targ_block = e->to(); 1717 Trace *targ_trace = trace(targ_block); 1718 bool targ_at_start = targ_trace->first_block() == targ_block; 1719 1720 if (src_trace == targ_trace) { 1721 // This may be a loop, but we can't do much about it. 1722 e->set_state(CFGEdge::interior); 1723 continue; 1724 } 1725 1726 if (fall_thru_only) { 1727 // If the edge links the middle of two traces, we can't do anything. 1728 // Mark the edge and continue. 1729 if (!src_at_tail & !targ_at_start) { 1730 continue; 1731 } 1732 1733 // Don't grow traces along backedges? 1734 if (!BlockLayoutRotateLoops && (targ_block->_rpo <= src_block->_rpo)) { 1735 continue; 1736 } 1737 1738 // If both ends of the edge are available, why didn't we handle it earlier? 1739 assert(src_at_tail ^ targ_at_start, "Should have caught this edge earlier."); 1740 1741 if (targ_at_start) { 1742 // Insert the "targ" trace in the "src" trace if the insertion point 1743 // is a two way branch. 1744 // Better profitability check possible, but may not be worth it. 1745 // Someday, see if the this "fork" has an associated "join"; 1746 // then make a policy on merging this trace at the fork or join. 1747 // For example, other things being equal, it may be better to place this 1748 // trace at the join point if the "src" trace ends in a two-way, but 1749 // the insertion point is one-way. 1750 assert(src_block->num_fall_throughs() == 2, "unexpected diamond"); 1751 e->set_state(CFGEdge::connected); 1752 src_trace->insert_after(src_block, targ_trace); 1753 union_traces(src_trace, targ_trace); 1754 } else if (src_at_tail) { 1755 if (src_trace != trace(_cfg.get_root_block())) { 1756 e->set_state(CFGEdge::connected); 1757 targ_trace->insert_before(targ_block, src_trace); 1758 union_traces(targ_trace, src_trace); 1759 } 1760 } 1761 } else if (e->state() == CFGEdge::open) { 1762 // Append traces, even without a fall-thru connection. 1763 // But leave root entry at the beginning of the block list. 1764 if (targ_trace != trace(_cfg.get_root_block())) { 1765 e->set_state(CFGEdge::connected); 1766 src_trace->append(targ_trace); 1767 union_traces(src_trace, targ_trace); 1768 } 1769 } 1770 } 1771 } 1772 1773 // Order the sequence of the traces in some desirable way 1774 void PhaseBlockLayout::reorder_traces(int count) { 1775 Trace** new_traces = NEW_RESOURCE_ARRAY(Trace*, count); 1776 Block_List worklist; 1777 int new_count = 0; 1778 1779 // Compact the traces. 1780 for (int i = 0; i < count; i++) { 1781 Trace* tr = traces[i]; 1782 if (tr != nullptr) { 1783 new_traces[new_count++] = tr; 1784 } 1785 } 1786 1787 // The entry block should be first on the new trace list. 1788 Trace* tr = trace(_cfg.get_root_block()); 1789 assert(tr == new_traces[0], "entry trace misplaced"); 1790 1791 // Sort the new trace list by frequency 1792 qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order); 1793 1794 // Collect all blocks from existing Traces 1795 _cfg.clear_blocks(); 1796 for (int i = 0; i < new_count; i++) { 1797 Trace* tr = new_traces[i]; 1798 if (tr != nullptr) { 1799 // push blocks onto the CFG list 1800 for (Block* b = tr->first_block(); b != nullptr; b = tr->next(b)) { 1801 _cfg.add_block(b); 1802 } 1803 } 1804 } 1805 } 1806 1807 // Order basic blocks based on frequency 1808 PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) 1809 : Phase(BlockLayout) 1810 , _cfg(cfg) { 1811 ResourceMark rm; 1812 1813 // List of traces 1814 int size = _cfg.number_of_blocks() + 1; 1815 traces = NEW_RESOURCE_ARRAY(Trace*, size); 1816 memset(traces, 0, size*sizeof(Trace*)); 1817 next = NEW_RESOURCE_ARRAY(Block*, size); 1818 memset(next, 0, size*sizeof(Block*)); 1819 prev = NEW_RESOURCE_ARRAY(Block*, size); 1820 memset(prev , 0, size*sizeof(Block*)); 1821 1822 // List of edges 1823 edges = new GrowableArray<CFGEdge*>; 1824 1825 // Mapping block index --> block_trace 1826 uf = new UnionFind(size); 1827 uf->reset(size); 1828 1829 // Find edges and create traces. 1830 find_edges(); 1831 1832 // Grow traces at their ends via most frequent edges. 1833 grow_traces(); 1834 1835 // Merge one trace into another, but only at fall-through points. 1836 // This may make diamonds and other related shapes in a trace. 1837 merge_traces(true); 1838 1839 // Run merge again, allowing two traces to be catenated, even if 1840 // one does not fall through into the other. This appends loosely 1841 // related traces to be near each other. 1842 merge_traces(false); 1843 1844 // Re-order all the remaining traces by frequency 1845 reorder_traces(size); 1846 1847 assert(_cfg.number_of_blocks() >= (uint) (size - 1), "number of blocks can not shrink"); 1848 } 1849 1850 1851 // Edge e completes a loop in a trace. If the target block is head of the 1852 // loop, rotate the loop block so that the loop ends in a conditional branch. 1853 bool Trace::backedge(CFGEdge *e) { 1854 bool loop_rotated = false; 1855 Block *src_block = e->from(); 1856 Block *targ_block = e->to(); 1857 1858 assert(last_block() == src_block, "loop discovery at back branch"); 1859 if (first_block() == targ_block) { 1860 if (BlockLayoutRotateLoops && last_block()->num_fall_throughs() < 2) { 1861 // Find the last block in the trace that has a conditional 1862 // branch. 1863 Block *b; 1864 for (b = last_block(); b != nullptr; b = prev(b)) { 1865 if (b->num_fall_throughs() == 2) { 1866 break; 1867 } 1868 } 1869 1870 if (b != last_block() && b != nullptr) { 1871 loop_rotated = true; 1872 1873 // Rotate the loop by doing two-part linked-list surgery. 1874 append(first_block()); 1875 break_loop_after(b); 1876 } 1877 } 1878 1879 // Backbranch to the top of a trace 1880 // Scroll forward through the trace from the targ_block. If we find 1881 // a loop head before another loop top, use the loop head alignment. 1882 for (Block *b = targ_block; b != nullptr; b = next(b)) { 1883 if (b->has_loop_alignment()) { 1884 break; 1885 } 1886 if (b->head()->is_Loop()) { 1887 targ_block = b; 1888 break; 1889 } 1890 } 1891 1892 first_block()->set_loop_alignment(targ_block); 1893 1894 } else { 1895 // That loop may already have a loop top (we're reaching it again 1896 // through the backedge of an outer loop) 1897 Block* b = prev(targ_block); 1898 bool has_top = targ_block->head()->is_Loop() && b->has_loop_alignment() && !b->head()->is_Loop(); 1899 if (!has_top) { 1900 // Backbranch into the middle of a trace 1901 targ_block->set_loop_alignment(targ_block); 1902 } 1903 } 1904 1905 return loop_rotated; 1906 }